From 04c7e18d8107a58d44dd3958a3d2cc762b43427b Mon Sep 17 00:00:00 2001 From: Little-Wallace Date: Mon, 27 Mar 2023 14:59:31 +0800 Subject: [PATCH] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 96ff27fa9f234769e35a1a940aa853353f1c469f Author: August Date: Mon Mar 27 13:24:29 2023 +0800 feat: support alter rename relations including table/mview/view/sink/index (#7745) commit 0069678da5188ae0a4c77db8cbdaf1d10c44dfbf Author: Runji Wang Date: Mon Mar 27 11:29:49 2023 +0800 feat: add more string functions (#8767) Signed-off-by: Runji Wang commit f4e2bdccd8ab2a725ebd36d47f5537257ba80420 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat Mar 25 12:12:53 2023 +0800 chore(deps): bump openssl from 0.10.47 to 0.10.48 (#8769) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 59920173e86ebc739fe9d50275b8e30b74e28276 Author: Richard Chien Date: Fri Mar 24 20:22:10 2023 +0800 refactor(hash agg): directly use `AggGroup` instead of `Box` (#8745) Signed-off-by: Richard Chien commit 5f41727aa284d38e312947dafcd9185a9da17ba2 Author: Renjie Liu Date: Fri Mar 24 20:07:17 2023 +0800 fix: Use debug log to record simulation time (#8768) commit 730242436b57583292ed2fdd604df674e0157e24 Author: Dylan Date: Fri Mar 24 18:34:47 2023 +0800 feat(meta): support group notification (#8741) commit be9723e3095e79fcd78f7917a45a587e936cbd85 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Fri Mar 24 18:25:00 2023 +0800 fix(state clean): state clean should not delete NULL (#8737) commit 4b03a93790f972f2f26a8669edfc4e84d5353578 Author: Yuhao Su <31772373+yuhao-su@users.noreply.github.com> Date: Fri Mar 24 17:11:15 2023 +0800 feat: introduce generated columns (#8700) commit 98ea76a6b92001c29ee3cac8f6843c669636f824 Author: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Fri Mar 24 16:57:19 2023 +0800 feat(optimizer): Add `StreamProjectMergeRule` (#8753) Co-authored-by: lmatz commit 380e1045a5ee4d6fff9f7639ab9341114dba36d7 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Fri Mar 24 16:39:31 2023 +0800 perf(stream): add simple strategy for if the stream project compact the chunk (#8758) commit 8bedb3cb243502507db26bcf5fed6c7dd4424035 Author: Eric Fu Date: Fri Mar 24 16:21:12 2023 +0800 feat: generate example.toml from default config (#8735) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> commit 2493fc189e0426d3dd17cedcf2e1427d6aa12643 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Fri Mar 24 15:15:48 2023 +0800 fix(bench): shorten the benchmark id (#8754) commit 44191a78630eb2bb2558930de3d67c40ad574500 Author: Runji Wang Date: Fri Mar 24 15:01:58 2023 +0800 refactor(expr): generate build-from-prost with procedural macros (#8499) Signed-off-by: Runji Wang commit 58f29a6717106c331c771a1ab2493e4555730097 Author: Shanicky Chen Date: Fri Mar 24 14:47:37 2023 +0800 fix: correctly handle graceful shutdown after losing leader (#8734) commit 1c6b2465e6848deddfb4490e09de757df00defc2 Author: zwang28 <70626450+zwang28@users.noreply.github.com> Date: Fri Mar 24 11:45:29 2023 +0800 chore(test): fix config for compaction test (#8752) commit e951cde55e395a07b26969264ea25999ee3a3287 Author: Kamalesh Palanisamy Date: Thu Mar 23 15:40:56 2023 -0500 feat: Add support for array_length function in psql (#8636) Co-authored-by: xxchan commit aeddef3ebb071c118a0b9294df12ae5bd174bdcb Author: Bohan Zhang Date: Thu Mar 23 20:11:44 2023 +0800 fix: avro timestamp_mills and timestamp_micros parse to timestamptz (#8730) Signed-off-by: tabVersion commit 24ecd4d633cc7d11cd80f4cba73c58b7918ba4a2 Author: Bohan Zhang Date: Thu Mar 23 18:19:17 2023 +0800 chore: fix unstable unit test (#8674) Signed-off-by: tabVersion commit d967fcc5bc6e83521447cd49461f820d79785456 Author: Shanicky Chen Date: Thu Mar 23 17:53:45 2023 +0800 chore: use a separate option for etcd election client (#8726) Signed-off-by: Shanicky Chen commit 92584b6e521dbc1bef65c9ba7b1213166b81d33b Author: Wallace Date: Thu Mar 23 17:01:14 2023 +0800 fix(meta): fix trivial move failed because of no member table ids (#8725) Signed-off-by: Little-Wallace commit 1df800a0d0b387976a0594be9ab1495429955861 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Thu Mar 23 16:49:55 2023 +0800 perf(hashkey): add benchmark for hash key ser/deser (#8733) commit 96aa23dd0dc04c869a2ea5fdb4b7b1fc8fbcbf6d Author: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Thu Mar 23 16:16:49 2023 +0800 docs(common): add more docs for `DataChunk` (#8736) commit f6ccfd51f83bee4b489478caf5678918749f13a1 Author: lmatz Date: Thu Mar 23 16:13:15 2023 +0800 perf(connector): use Vec instead of Bytes and &[u8] (#8732) commit 0e8d81f65331a35b1fff54f33259c88f0d866e85 Author: Yuhao Su <31772373+yuhao-su@users.noreply.github.com> Date: Thu Mar 23 16:02:52 2023 +0800 fix: evict hash join cache every n messages. (#8731) commit 8261a30d626bbf179e3a85c68bb533a6a368c1e9 Author: Zhidong Guo <52783948+Gun9niR@users.noreply.github.com> Date: Thu Mar 23 16:02:45 2023 +0800 fix(risedev): disallow in memory state store when there is a compactor (#8729) commit 9d64e500bfd804e3c674fec981b1041ae824762c Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Thu Mar 23 14:27:19 2023 +0800 test: add e2e extended mode test (#8710) commit 7bac23939c3c3f27e1fdea51df70c6224538d6f2 Author: Bugen Zhao Date: Thu Mar 23 13:57:21 2023 +0800 feat(meta): support scaling delta join (#8694) Signed-off-by: Bugen Zhao commit 837d43aa160148fcd09be880b19508469cfedebd Author: odysa Date: Wed Mar 22 22:45:57 2023 -0700 fix: integration tests telemetry env (#8705) commit 3e8ce312f5d65bb65db99c75ed6b2dc4cd984467 Author: Renjie Liu Date: Thu Mar 23 13:36:03 2023 +0800 chore: Add time for each slt record in simulation test (#8724) commit ea72ed7fa960accea46f1357233caab6dc7d33f5 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Thu Mar 23 12:11:42 2023 +0800 fix(watermark): fix watermark derivation in stream dynamic filter in … (#8719) commit 1b008f443b2fad415e61897e5bc3def2f47cc696 Author: Renjie Liu Date: Thu Mar 23 11:59:01 2023 +0800 chore: Remove unnecessary spawn (#8722) commit a1d084d50ef845b1a566b9e6c5ab733c2ad40509 Author: Bugen Zhao Date: Thu Mar 23 11:21:33 2023 +0800 fix(streaming): also enable schema check in release profile (#8711) Signed-off-by: Bugen Zhao commit ad61a71cb99c35e23d539360c2bc002de37eeeea Author: Eric Fu Date: Thu Mar 23 11:16:45 2023 +0800 test: add integration test of tidb/ticdc (#8708) commit 3a0ee3d49d2042cc2724b8f2fa3e8d9a1f205240 Author: lmatz Date: Thu Mar 23 08:50:15 2023 +0800 perf(parser): do to_ascii_lowercase only once (#8718) commit 669087ea1a7cb79b668e584877ef2122b0d0706d Author: Renjie Liu Date: Wed Mar 22 20:15:36 2023 +0800 fix: Dashboard main should also setup protoc (#8717) commit c34ffc164c64ca6b3fea0293f604456b34087f88 Author: Renjie Liu Date: Wed Mar 22 19:36:28 2023 +0800 chore: Remove proto gen in dashboard (#8716) commit 815a51ad67c82da89c7c2d72626958733d28e50e Author: Renjie Liu Date: Wed Mar 22 17:52:39 2023 +0800 fix(batch): Cancel task should not propagate error. (#8675) commit c78468396c1290913a0a901ef405b116a2ca307a Author: idx0-dev <124041366+idx0-dev@users.noreply.github.com> Date: Wed Mar 22 16:32:47 2023 +0800 fix: csv empty cell should be parsed to null (#8709) commit 359fbf5b8622765b1c9005d2bf4b6ea7a32e921b Author: Wallace Date: Wed Mar 22 15:43:50 2023 +0800 feat(meta): support move state-table between compaction-group (#8390) Signed-off-by: Little-Wallace commit 52caa658738e111e3e23d365316969232aa0a282 Author: lmatz Date: Wed Mar 22 15:32:32 2023 +0800 revert: feat(streaming): call may_exist when insert cache miss in join executor (#7957) (#8655) commit 86ffe994cc32f3032443fce8498272c75d1b0180 Author: Zhidong Guo <52783948+Gun9niR@users.noreply.github.com> Date: Wed Mar 22 14:45:24 2023 +0800 deprecate(config): deprecate state store url on worker nodes (#8704) commit 9e774bb2c792776e7f455071a067f25ce22ffb4f Author: Richard Chien Date: Wed Mar 22 14:33:00 2023 +0800 refactor(hash agg): split building change and applying change (#8706) Signed-off-by: Richard Chien commit b474059a74ea2a2cbb5fa60d6af6da307c599ea8 Author: WillyKidd <57129289+WillyKidd@users.noreply.github.com> Date: Wed Mar 22 12:29:22 2023 +0800 test(connector): add test cases for postgres validation permission checks (#8662) commit 4738ee98a635c337617e9670e122b3c4c527b60f Author: Eric Fu Date: Wed Mar 22 12:07:54 2023 +0800 fix(ci): fix github workflow syntax error (#8702) commit 655d2bd2e9985a222c3471a33f2bb9bae08663de Author: Zhanxiang (Patrick) Huang Date: Wed Mar 22 11:55:38 2023 +0800 feat(sst-dump): support more features (#8580) commit 8648ff8ddee52e4a8fa9dcf13cb01c4bbf9510b2 Author: Bugen Zhao Date: Wed Mar 22 11:35:40 2023 +0800 refactor(simulation): interface of running multiple queries in the same session (#8697) Signed-off-by: Bugen Zhao commit 6313c4276e7ae5049922ad966468b3d23693be91 Author: TennyZhuang Date: Wed Mar 22 11:26:49 2023 +0800 refactor(dashboard): ignore `dashboard/proto/gen` and gen them during `npm i` (#8695) Signed-off-by: TennyZhuang commit 3a06226b683956b1778940328b0e3fbabc113ac8 Author: xxchan Date: Tue Mar 21 12:02:55 2023 +0100 fix(stream): careful cache invadiation for TopN (#8659) Co-authored-by: st1page <1245835950@qq.com> commit 9abe5dca505a103847730defa64bc5ae71c68c28 Author: Richard Chien Date: Tue Mar 21 18:18:39 2023 +0800 perf(hash agg): use `get_mut` instead of `pop`+`put` pattern (#8691) Signed-off-by: Richard Chien commit bf954734311b4eb791f23e5be634ce1f0a0879eb Author: Eric Fu Date: Tue Mar 21 18:14:20 2023 +0800 fix(connector): support more data types in JDBC sink (#8678) Signed-off-by: tabVersion Co-authored-by: tabVersion commit 694c4465170fe8fce12e3a6e195b81954c8da65e Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Tue Mar 21 17:50:40 2023 +0800 fix(watermark): avoid panic in watermark derivation (close #8689) (#8690) commit 870ba34fc37bf29a63cfa9ee68cfd8c681aea48d Author: TennyZhuang Date: Tue Mar 21 17:20:06 2023 +0800 chore(deps): bump minitrace,toml,auto_enums,bit_flags,... (#8682) Signed-off-by: TennyZhuang commit 0e8f518637fcd4d63b2f8a707430588b5e15c041 Author: TennyZhuang Date: Tue Mar 21 16:59:02 2023 +0800 perf(array): avoid double bounds check in ArrayIterator (#8685) Signed-off-by: TennyZhuang commit 16c970858d890d7e9fb6098d2bd5502e33b0705b Author: Bugen Zhao Date: Tue Mar 21 16:22:57 2023 +0800 fix(streaming): drop subtask on another blocking thread (#8672) Signed-off-by: Bugen Zhao commit 83057e5c79748cb99640ace42a3ec7f7f0edbe03 Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Tue Mar 21 15:46:31 2023 +0800 feat(storage): support azblob (#8257) commit dd34989183300ee5607ba7cf217d94a39b80c688 Author: Bohan Zhang Date: Tue Mar 21 14:40:15 2023 +0800 chore: fix docker compose test (#8666) Signed-off-by: tabVersion commit 02b3ea21293d9556144da8e23d2e1d464a003fa3 Author: odysa Date: Mon Mar 20 22:44:59 2023 -0700 feat: telemetry (#7384) commit 3d4bca7eec43299a40b924502275703193dbff0e Author: Bugen Zhao Date: Tue Mar 21 13:24:36 2023 +0800 fix: also enable jemalloc on macOS (#8665) Signed-off-by: Bugen Zhao commit 30794414c25fbdf358d0e2c31b7cbeb61e71be2d Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Tue Mar 21 13:16:49 2023 +0800 fix(optimizer): `PlanCorrelatedIdFinder` should be aware of agg filter (#8667) commit d557a6ce5c7e74ec97c12f80a16c1fc4b615f3ef Author: Dylan Date: Tue Mar 21 13:05:15 2023 +0800 fix(meta): fix alter table add/drop column with indexes (#8664) commit 59a09479567121baee2a122c32f7b6ddecafe045 Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Tue Mar 21 11:17:54 2023 +0800 fix(sink): pass downstream pk when starting a sink (#8660) commit 8c5489efc062c0f5d4e1a9d3b2bb94c99e18ddc7 Author: Bugen Zhao Date: Tue Mar 21 11:06:04 2023 +0800 fix(meta): correctly resolve update of vnode mapping after scaling (#8652) Signed-off-by: Bugen Zhao commit cc6e687d0de24750a38a09fdad96947ba16e7855 Author: Clearlove <52417396+Eurekaaw@users.noreply.github.com> Date: Mon Mar 20 12:51:14 2023 -0400 feat(optimizer): optimize always-false filter for batch (#8629) Signed-off-by: Clearlove commit ae99e5506eff3f504f1d845b33f557ddd5ea6064 Author: Li0k Date: Mon Mar 20 20:38:58 2023 +0800 feat(storage): basic compactor scheduler policy (#6986) Co-authored-by: Yuhao Su <31772373+yuhao-su@users.noreply.github.com> Co-authored-by: Dylan Co-authored-by: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Co-authored-by: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: congyi wang <58715567+wcy-fdu@users.noreply.github.com> commit eddb2fcbee764868636d718fabfc8003318d641a Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Mon Mar 20 19:41:21 2023 +0800 fix(optimizer): `ApplyAggTransposeRule` should handle `CorrelatedInputRef` in agg filter (#8650) commit 32f4925eaab27bd1283ba21c60fb10129211893a Author: WillyKidd <57129289+WillyKidd@users.noreply.github.com> Date: Mon Mar 20 18:08:45 2023 +0800 fix(connector): add postgres permission checks in validation phase (#8525) commit ce9e51992f7481532846e9435dd57e476bf2853f Author: Dylan Date: Mon Mar 20 18:01:00 2023 +0800 feat(frontend): Enhance parsing error reporting by providing location information. (#8646) commit 7fbab014406291f5483090723a4b91f296c257ff Author: Bugen Zhao Date: Mon Mar 20 16:23:14 2023 +0800 chore: remove accidentally pushed files (#8645) Signed-off-by: Bugen Zhao commit 44943ec6ede6c968947f473a299ba2e4291feba9 Author: Bohan Zhang Date: Mon Mar 20 16:06:31 2023 +0800 fix: display error message on the frontend (#8638) Signed-off-by: tabVersion Co-authored-by: lmatz commit 1dee8247aacd9b4de3978278883df194ff22ef30 Author: xxchan Date: Mon Mar 20 08:27:14 2023 +0100 doc: add notes for `.cargo/config.toml` (#8632) commit 13641c88e1c3279addad5152941e865af2fd7c1d Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Mon Mar 20 15:16:30 2023 +0800 feat(test): test opendal fs engine on main cron with large dataset (#8637) commit 014f6db973113b25eee485d8e07fa371213a6703 Author: Shanicky Chen Date: Mon Mar 20 13:55:14 2023 +0800 feat: remove the exchange after append_only source (#8532) commit 19b1be33bc85fcf2ce5ee7e6a9ce11183edd0d57 Author: Eric Fu Date: Mon Mar 20 13:25:06 2023 +0800 fix(risedev): connector-node start-up script (#8590) Co-authored-by: lmatz commit da57717d3b41647049bfa2d7c4a5963336989f35 Author: Bugen Zhao Date: Mon Mar 20 13:08:44 2023 +0800 fix(streaming): drop actor on another blocking thread (#8624) Signed-off-by: Bugen Zhao commit f111bfb7453cab2d5136f530cfe4aeb1f536ff3a Author: Zhidong Guo <52783948+Gun9niR@users.noreply.github.com> Date: Mon Mar 20 11:39:15 2023 +0800 fix(config): bring back system params to config file (#8623) commit 3789023ab40d1dd33e90d2d49920a152d399f861 Author: xxchan Date: Mon Mar 20 03:56:27 2023 +0100 fix: fix connector start in risedev (#8587) commit 41323a9eccadcb02f9bc471bfb07fb4c332d7448 Author: Richard Chien Date: Mon Mar 20 01:04:51 2023 +0800 feat(common): support `NULLS {FIRST | LAST}` (#8485) Signed-off-by: Richard Chien commit 6940ae43eed2118300041375cab527a051a05dc8 Author: Tesla Zhang‮ Date: Sat Mar 18 16:57:04 2023 -0400 feat(pretty): introduce the proposed pretty printer in stream_graph_formatter (#8576) commit 99f4604e84b8e98051ea43097fa14df6cc616eeb Author: Bohan Zhang Date: Sat Mar 18 16:09:40 2023 +0800 chore: add timeout to fix main ci (#8627) Signed-off-by: tabVersion commit 9a818b8e32e1a2b0066a6d84871cfe224253178e Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Sat Mar 18 14:53:22 2023 +0800 fix(common): interval multiplication and division (#8620) commit 53261c5aec42d7cf89945aeed54037572916d9d3 Author: Kevin Axel Date: Sat Mar 18 11:47:22 2023 +0800 feat: Bushy tree join ordering (#8316) Signed-off-by: Kevin Axel commit a3dc882c118e14f71c13bee59e7ac8aa63293c5b Author: StrikeW Date: Fri Mar 17 23:52:09 2023 +0800 fix(compaction-test): add --advertise-addr to meta's opts (#8611) commit 99501d174f394f385df433d8cd7ed26a14337365 Author: Yufan Song <33971064+yufansong@users.noreply.github.com> Date: Fri Mar 17 10:39:28 2023 -0400 feat(connector-node): support stream chunk payload in connector node (#8548) Co-authored-by: William Wen commit 8c95702c98f543bf31415e92f9c36932eb2daf2a Author: Eric Fu Date: Fri Mar 17 21:06:36 2023 +0800 fix(sink): remove `unimplemented` (#8622) Signed-off-by: tabVersion Co-authored-by: tabVersion commit 7cd7c9db303be69e295c4dc7973585fffcc095c8 Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Fri Mar 17 20:47:53 2023 +0800 fix(conector-node): do not store sink row inside upsert iceberg sink (#8625) commit 6fd8821f2e053957b183d648bea9c95b6703941f Author: Runji Wang Date: Fri Mar 17 20:19:22 2023 +0800 chore: replace all `ProstXxx` with `PbXxx` (#8621) Signed-off-by: Runji Wang commit d90165a9831384e3223a4ae9504cb803edadff65 Author: StrikeW Date: Fri Mar 17 18:41:37 2023 +0800 refactor(connector): use config file to initiate a debezium source connector (#8539) commit bba43baf3df6fba1aad3f77c38927941e2876613 Author: Bohan Zhang Date: Fri Mar 17 16:49:00 2023 +0800 feat: make gen timestamp deterministic (#8619) Signed-off-by: tabVersion commit 18863e0ce9d996a652be17c0e16ce60dbe8423d3 Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Fri Mar 17 16:25:10 2023 +0800 refactor(frontend): replace dist_key_indices with dist_key_in_pk_indices in frontend (#8617) commit 961e342baa91c62948bb37d659dc483f1106d4c5 Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Fri Mar 17 16:10:39 2023 +0800 feat(sink): enable user-defined primary key for upsert sink (#8610) commit 88aa6a410caecb5b446e8a4bb4c4056eae4cfeeb Author: Bugen Zhao Date: Fri Mar 17 16:00:45 2023 +0800 fix(test): use correct type for nexmark source planner test (#8618) Signed-off-by: Bugen Zhao commit 9be17af74ad82be7879f360b51172607870a982b Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Fri Mar 17 15:07:54 2023 +0800 fix(common): interval input/output overflow and negative handling (#8613) commit 1b208bbcd53d24f6c4f3caee5d94165854b6552c Author: Bugen Zhao Date: Fri Mar 17 14:57:38 2023 +0800 feat(streaming): separate `BarrierRecv` executor (#8595) Signed-off-by: Bugen Zhao commit 78ddbce59cd8318cb9f983a37f06b6ff08953015 Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Fri Mar 17 14:12:06 2023 +0800 feat(connector-node): specify sink payload format in start sink and call close for iterator and sink row (#8585) commit f4a2f8d6bd833356d7d4d58732473a8c43ecbcf4 Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Fri Mar 17 13:49:55 2023 +0800 refactor(storage): sstable iter compare fullkey struct instead of encoded key to avoid memory allocation (#8607) commit ab701c78ec5de4d8015dbcee24294533f7bf5cbb Author: Wallace Date: Fri Mar 17 13:48:25 2023 +0800 fix(meta): limit file count and l0 compact score (#8563) Signed-off-by: Little-Wallace commit 4f430ac11faa18fc8d7a0030c85c78863b6e0a9b Author: StrikeW Date: Fri Mar 17 13:34:25 2023 +0800 feat(source): support private link for kafka connector (#8247) Signed-off-by: Runji Wang Co-authored-by: Runji Wang commit 7b5ffb4ea4f3f8e52dcb7f5f92b4fa983b38259f Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Fri Mar 17 12:33:19 2023 +0800 feat(connector): validate sink primary key and sink type on connector node (#8599) commit 08fc246b0ae6425f2adf30b864eff01b575fbd9f Author: Zhanxiang (Patrick) Huang Date: Thu Mar 16 22:33:56 2023 +0800 feat(source): store source split state as jsonb (#8602) commit cfc03492985fef3903a07cd095fdfa5a2647f8c0 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Thu Mar 16 22:19:45 2023 +0800 perf(compaction): avoid duplicate data in LSM (#8489) commit ad7e21bd82a66cada6bcbb90ab803510ea06fc91 Author: Clearlove <52417396+Eurekaaw@users.noreply.github.com> Date: Thu Mar 16 10:08:18 2023 -0400 refractor(optimizer): replace `StreamIndexScan` by `StreamTableScan` on logical index scan (#8567) Signed-off-by: Clearlove commit 5efe089d0b775990521757fec5ff7a2d6e073835 Author: Clearlove <52417396+Eurekaaw@users.noreply.github.com> Date: Thu Mar 16 09:42:52 2023 -0400 feat(frontend): ban `update` statements modifying pk columns (#8569) Signed-off-by: Clearlove commit 25a912767301c744ec1dc91f1ade4d63cef627df Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Thu Mar 16 21:04:50 2023 +0800 feat(frontend): support extended query protocol handle (#8565) commit 018fe9e4c0f184eb571ca3a51c3d962a1f8c9a5d Author: Runji Wang Date: Thu Mar 16 20:09:27 2023 +0800 doc(udf): improve UDF documentation (#8597) Signed-off-by: Runji Wang commit 2eaea50369fa1a1ec905b829bc98c076e3c40351 Author: TennyZhuang Date: Thu Mar 16 19:59:45 2023 +0800 test(stream): add join and temporal_filter state-cleaning test (#8596) Signed-off-by: TennyZhuang Co-authored-by: Yuhao Su <31772373+yuhao-su@users.noreply.github.com> commit a4bd8775c7319e581d9966cdc95075a3c03e0b77 Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Thu Mar 16 19:57:17 2023 +0800 chore: remove dist_key_indices in state table and storage table (#8601) commit c683098c18d2cc41ced29ca57a0cf6462bc71818 Author: zwang28 <70626450+zwang28@users.noreply.github.com> Date: Thu Mar 16 19:09:16 2023 +0800 fix(test): fix test config (#8586) commit bcb324b36a126132dab0005ad537ffbebc773223 Author: Dylan Date: Thu Mar 16 18:38:46 2023 +0800 feat(meta): add internal table to pg_class (#8594) commit 632423ae6b18ffb8284d15466f3ba454c3bf42a3 Author: Dylan Date: Thu Mar 16 18:08:13 2023 +0800 fix(optimizer): fix hash join distribution (#8598) commit 582307d3f16504c1591be0bf7c5df66152ef4f2d Author: Bugen Zhao Date: Thu Mar 16 18:05:16 2023 +0800 refactor(playground): refinements on connector node (#8582) Signed-off-by: Bugen Zhao Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> commit 9b89bb0946874ccb5553c9d82ec3e4ac5fb654da Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Thu Mar 16 17:47:55 2023 +0800 fix(optimizer): projectSet && overAgg should call input's predicate push down && prune col (#8588) commit 14bfc62b95d0a5f5dda4a7e3122829d5f3d31f99 Author: Shanicky Chen Date: Thu Mar 16 17:42:50 2023 +0800 chore: add is_visible in column for connector (#8592) commit 075d50a4938bab239b09fadb5b5e5f1963db6139 Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Thu Mar 16 17:29:52 2023 +0800 feat(frontend): support BATCH_PARALLELISM (#8552) commit bd9d1563c3942dff859ca5531b9aa34c699f0bdf Author: Bohan Zhang Date: Thu Mar 16 17:28:09 2023 +0800 fix: load pk from the downstream instead of Risingwave (#8457) Signed-off-by: tabVersion commit 25a4809a0a6099743a4af9a418ae019ca9edb5ad Author: Bugen Zhao Date: Thu Mar 16 16:16:25 2023 +0800 feat(streaming): only output required columns for MV-on-MV (#8555) Signed-off-by: Bugen Zhao commit 64a5f88fd2cb3c629264e442a908292911478d77 Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Thu Mar 16 15:13:09 2023 +0800 feat(storage): do not compress table_id (#8512) commit 7641b15f5508fd95e9be60fc5c2d9826a838b55d Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Thu Mar 16 15:12:01 2023 +0800 feat(test): add e2e test for OpenDAL fs backend (#8528) commit 65a641dbe4f94372d16307ff1e46cf248a5a6396 Author: TennyZhuang Date: Thu Mar 16 15:00:39 2023 +0800 feat(frontend): support SET TIME ZONE (#8572) Signed-off-by: TennyZhuang commit c19fc724312528aa634a82432b69ae8c94e07797 Author: Eridanus <45489268+Eridanus117@users.noreply.github.com> Date: Thu Mar 16 14:29:43 2023 +0800 feat: Support optional parameter `offset` in `tumble` and `hop` (#8490) commit 777e8367dcb344c36b4422f179e18494706afbed Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Thu Mar 16 14:09:19 2023 +0800 perf(streaming): add the missed read prefix hint on state table (#8545) commit e61af5aff1767a58c4de5b0d3723e5407ad08542 Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Thu Mar 16 14:08:58 2023 +0800 fix(common): interval overflow panic / wrap during comparison and justify (#8556) commit 04a2885c1b9cf20f23f34e62114e793c482f1233 Author: lmatz Date: Thu Mar 16 14:05:30 2023 +0800 fix: add --advertise-addr to playground meta's opts (#8581) commit 6060672f8a450719aac49669a90435541d31ffae Author: lmatz Date: Thu Mar 16 13:51:41 2023 +0800 deprecate: remove --host and --client-address args (#8575) commit 30a8946c9d2689f19c92dcf2a6a1753b5b8da944 Author: odysa Date: Wed Mar 15 22:07:44 2023 -0700 fix(common): decimal round digits test (#8568) Co-authored-by: Noel Kwan <47273164+kwannoel@users.noreply.github.com> commit 88550e402b998d9964ae3554926478dbed13e015 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Thu Mar 16 13:07:42 2023 +0800 fix(hash join): avoid emitting chunks that violate `UpdateDelete` ass… (#8579) commit dc76ad780cdf5a2997affe1ca2be0002915f9927 Author: lmatz Date: Thu Mar 16 12:39:41 2023 +0800 deprecate: remove host cmd line argument when starting meta node (#8574) commit b0f276b572a793b184a71859a823edc007db34c2 Author: Zhidong Guo <52783948+Gun9niR@users.noreply.github.com> Date: Thu Mar 16 01:41:51 2023 +0800 fix(config): remove system params from config file (#8366) commit 24fe1e876c72f346b64519e3ecf1df14873d74d4 Author: jon-chuang <9093549+jon-chuang@users.noreply.github.com> Date: Wed Mar 15 23:35:00 2023 +0800 fix(batch, source): Propagate user errors (#8493) Signed-off-by: Runji Wang Co-authored-by: jon-chuang Co-authored-by: Runji Wang commit 81b4d59ba2bd04712a0b6923ac000dd11e02fdf1 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Wed Mar 15 21:11:24 2023 +0800 perf(prefetch): enable prefetch for hash join with degree table (#8566) commit 7bd208de44e0c5714e7b6fca06a4d0f62e850667 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Wed Mar 15 20:41:28 2023 +0800 fix(explain): add missed fields in explain streaming dist plan (#8544) commit 03cc2aebbc8dddaac135bdcb77b2b4395dd2a8af Author: Runji Wang Date: Wed Mar 15 16:52:47 2023 +0800 refactor(expr): make evaluation async (#8229) Signed-off-by: Runji Wang commit 1a11c3fc75ef180642069d3698418b6641c18197 Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Wed Mar 15 15:58:03 2023 +0800 feat(frontend): support bind paramater (#8543) commit f92d7f6034b2cbe06ca0fb206bffeac688b4a9dd Author: Runji Wang Date: Wed Mar 15 14:39:07 2023 +0800 feat(udf): support user-defined table function (UDTF) (#8255) Signed-off-by: Runji Wang Co-authored-by: xxchan commit 61191c2e1091c2d569e72dbd49800614989db817 Author: Shanicky Chen Date: Wed Mar 15 14:36:56 2023 +0800 feat(common): Add support for DataType::Serial (#8392) commit f907452796c5923af2799b2968aae50c72f369eb Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed Mar 15 13:56:34 2023 +0800 chore(deps): bump golang.org/x/net from 0.0.0-20220927171203-f486391704dc to 0.7.0 in /integration_tests/datagen (#8521) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Yuhao Su commit 1449439964f75d9e40d9a3b3c108abcf1b0388ed Author: Bugen Zhao Date: Wed Mar 15 13:47:22 2023 +0800 refactor(fragmenter): remove `is_singleton` workarounds on `Chain` (#8536) Signed-off-by: Bugen Zhao commit 9f68cef5ec1d7f6e16f77f307a7ee11fa2df2820 Author: broccoliSpicy <93440049+broccoliSpicy@users.noreply.github.com> Date: Wed Mar 15 12:13:52 2023 +0800 feat(expr): support builtin function pi. (#8509) commit 8be47342d2c78eee4f41367d1b9fd7101a3c310e Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Wed Mar 15 12:12:44 2023 +0800 feat(java-binding): support java binding on stream chunk (#8517) commit 8be23f4b14f1fff11b32ef2f150618564108e626 Author: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Wed Mar 15 12:03:41 2023 +0800 feat(sqlsmith): add statement-level reducer (#8507) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> commit 320d755578505832134d653b918d4e43db442301 Author: TennyZhuang Date: Wed Mar 15 12:01:24 2023 +0800 test(stream): add state cleaning test (#8546) Signed-off-by: TennyZhuang Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> commit 8b09f5e4e641e70c9d96d3880d622ad05ea8b7ce Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Tue Mar 14 22:14:47 2023 +0800 feat(sst id): avoid SST ID gap when generating (#8542) commit 53c6a4cd9ce59bfc528a2f85783b2688e69619e7 Author: Bugen Zhao Date: Tue Mar 14 20:55:17 2023 +0800 fix(expr): do not construct error for extracting time subfield (#8538) Signed-off-by: Bugen Zhao commit 53eaa241d102fc95148c987a748db686b9bd1b94 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Tue Mar 14 20:47:57 2023 +0800 refactor(compactor): remove useless code (#8514) commit 18b04a4b86f447f35a94d697078cf6df4972fa64 Author: Li0k Date: Tue Mar 14 20:34:52 2023 +0800 feat(storage): flexible KeyPrefix encoding for Block (#8379) commit 0c53349624d0c1daf15c0a08dce233c0779b52da Author: Zhanxiang (Patrick) Huang Date: Tue Mar 14 19:38:52 2023 +0800 fix: move sst meta offset to the end of the object (#8524) commit d49a4c5c42bb05c603be65bcf0cbd60b81319bb6 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Tue Mar 14 19:35:05 2023 +0800 refactor(optimizer): move fd dervie into core (#8540) commit 305c86497520dad866c1daa951d53168378b27af Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Tue Mar 14 19:31:56 2023 +0800 feat(frontend): support infer param in binder (#8453) commit 9d5ff78bddf2ba615e9bc3cb687611ac1c9b9251 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Tue Mar 14 19:23:36 2023 +0800 refactor(storage): distinguish SST id and object id (close #8434) (#8436) commit 6d28cf6eac7b2449d167da0c130781382026106d Author: Bugen Zhao Date: Tue Mar 14 18:24:16 2023 +0800 refactor(streaming): only scan necessary columns in backfill (#8533) Signed-off-by: Bugen Zhao commit 42f17a6322aaa51240669b5d1ea39616fbf16e08 Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Tue Mar 14 17:21:22 2023 +0800 fix(pgwire): process empty query correctly (#8535) commit 84a9831fc1d097eee0740c0b0630b661a051ee84 Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Tue Mar 14 17:20:04 2023 +0800 feat(connector): unify and simplify path config of minio and s3 (#8508) commit 0e939984f745ff00184c4001782a982e99c1cad1 Author: August Date: Tue Mar 14 16:38:53 2023 +0800 fix: avoid panic when upstream input is closed for lookup (#8529) commit 338ace9dd6b928a65a2cc9a00ebdff9730528f31 Author: Dylan Date: Tue Mar 14 15:41:25 2023 +0800 chore(batch): reuse common logical for index selection (#8531) commit eb56b9f0d526d334ea97ce3cf4e94762d2433d37 Author: Noel Kwan <47273164+kwannoel@users.noreply.github.com> Date: Tue Mar 14 15:23:36 2023 +0800 fix(ci): run entire snapshot in `main`, `pr` and `main-cron` workflows (#8523) commit d06674b2c1c4a859abbc9bb70fddb5acdc430a14 Author: Shanicky Chen Date: Tue Mar 14 14:53:40 2023 +0800 refactor: move row_id to common (#8513) commit 9bf2e8c96fb47ef08c12d42a73bf121ec52e5a7a Author: Bugen Zhao Date: Tue Mar 14 14:47:15 2023 +0800 fix(ci): increase scaling test timeout for nexmark recovery test (#8526) Signed-off-by: Bugen Zhao commit a4afac3d374958ece7f8921df33f3f94fceb5a65 Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Tue Mar 14 14:21:29 2023 +0800 refactor: refine conditional compilation for mem control on different OSs (#8504) Co-authored-by: Bugen Zhao commit 9564db0c2f3c10adbb75190d7a98c4e646cd5b99 Author: Dylan Date: Tue Mar 14 14:12:55 2023 +0800 feat(batch): Support index selection for sort aggregation with a descending ordering (#8515) commit 97b021d3e3a3816358792f922171f71f488aef4a Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Tue Mar 14 14:00:59 2023 +0800 fix(streaming): ignore null stream key from full outer join to workaround (#8520) commit 53da2e3246adfdc3b59d60186013279858bcb110 Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Tue Mar 14 13:58:49 2023 +0800 fix(ci): exclude `go.mod` and `go.sum` files for typos (#8527) commit 8183b41565afcbd3ab3e2dd0b279b183973ceaca Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Tue Mar 14 12:49:45 2023 +0800 feat(meta): introduce sink validation in meta (#8417) commit fbcd407fcb3cbc0ea4377388ada5b467b165ea20 Author: Bugen Zhao Date: Tue Mar 14 12:05:02 2023 +0800 refactor: use u16 representation for virtual node (#8385) Signed-off-by: Bugen Zhao Co-authored-by: Renjie Liu commit 4f34adeaa51c1260640c7de20c58fffccfb26024 Author: Eric Fu Date: Tue Mar 14 11:13:27 2023 +0800 chore: migrate demo projects here as intergration tests (#8502) Co-authored-by: TennyZhuang commit c11fb641a119d87d63b6ad2624ca18715d24f948 Author: Dylan Date: Tue Mar 14 10:41:24 2023 +0800 fix(optimizer): ban scalar subquery for project set (#8519) commit 428354d3471351b75c1483c7b404fa7490bca78a Author: zwang28 <70626450+zwang28@users.noreply.github.com> Date: Mon Mar 13 19:08:46 2023 +0800 feat(backup): support mutating backup config (#8505) Co-authored-by: Zhidong Guo <52783948+Gun9niR@users.noreply.github.com> commit 85e450dc26eae658a928ded350149a8d91a4cf41 Author: Bugen Zhao Date: Mon Mar 13 18:42:27 2023 +0800 fix(streaming): map watermark in dispatcher with output indices (#8506) Signed-off-by: Bugen Zhao commit cdaa8cf63d75498aef56a4b5a94b40aebed8b011 Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Mon Mar 13 16:59:20 2023 +0800 fix(common): interval should have microsecond precision (#8501) commit b235f689c8193c08c43ace25232c02f27951110e Author: Renjie Liu Date: Mon Mar 13 16:42:34 2023 +0800 refactor: ExchangeWriter should report error throught status (#8478) commit 2db01f9dad55d9ebb6d306412938e4d41e0570bb Author: Dylan Date: Mon Mar 13 16:01:35 2023 +0800 feat(streaming): support temporal join part 3 (#8480) Co-authored-by: Bugen Zhao Co-authored-by: st1page <1245835950@qq.com> commit f36bf0b3c4b51c46af9b8cbac50c7ca95e224de1 Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Mon Mar 13 14:37:17 2023 +0800 feat(frontend): support SET SESSION CHARACTERISTICS AS TRANSACTION (#8497) commit 79a37866b641b2fe5ff745cd660adafa2b07f044 Author: Bugen Zhao Date: Mon Mar 13 14:36:33 2023 +0800 fix(common): only warn on dropping non-empty builder (#8494) Signed-off-by: Bugen Zhao commit e1ae04ea8f8485e640558f732f3f65d87b3db570 Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Mon Mar 13 14:34:24 2023 +0800 fix(streaming): hop executor handle watermark (#8498) commit c0aa78b039aba5ae2ac9ca652a64aa80903634de Author: congyi wang <58715567+wcy-fdu@users.noreply.github.com> Date: Mon Mar 13 13:59:17 2023 +0800 refactor(storage): sstable and block level interfaces use fullkey struct (#8419) commit 32100f31863e058845124ac40379a7953febbc0a Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Mon Mar 13 13:05:23 2023 +0800 feat(sink): set parallelism of iceberg sink to 1 (#8476) commit a6c8c86ba6eab237ace63f3cdacf8b422e3ec63d Author: Yuanxin Cao <60498509+xx01cyx@users.noreply.github.com> Date: Mon Mar 13 11:59:26 2023 +0800 feat: user configurable memory control policy (#8475) commit 28c539c8cc30946e3dc021b95ed80912a255bf56 Author: Bugen Zhao Date: Sun Mar 12 15:52:17 2023 +0800 refactor: use a macro for logical vs physical type matching (#8479) Signed-off-by: Bugen Zhao commit e51f6392719f68324516db128396f45300e6d95e Author: Eric Fu Date: Sun Mar 12 14:22:16 2023 +0800 fix: improve CDC connector param check (#8450) commit b7c46d4f4cad63a195633caa817b92f59bf3f011 Author: zwang28 <70626450+zwang28@users.noreply.github.com> Date: Sun Mar 12 11:50:53 2023 +0800 feat(storage): limit hummock write based on LSM tree stats (#8383) commit 4b49428fc961f5fbe30c3cac48d45c99aa70d0c1 Author: TennyZhuang Date: Sat Mar 11 23:44:56 2023 +0800 feat(parser): report nearby tokens when parse failed (#8465) Signed-off-by: TennyZhuang commit 00ea62aeefc1b6447c5b87465b77d59dca5b3a7e Author: TennyZhuang Date: Sat Mar 11 23:13:11 2023 +0800 chore(sqlparser): add do-apply-parser-test (#8486) Signed-off-by: TennyZhuang commit a77f6ccb69e47eeb99fe49c81796c918a0df8530 Author: StrikeW Date: Sat Mar 11 21:27:45 2023 +0800 fix(connector): fix logging after bump to log4j2 (#8487) commit d07f2bbacf648e3e6d18f7274b582e4e5e1a2ad8 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Sat Mar 11 13:13:12 2023 +0800 fix(compaction group): trivial adjust should also increase divide ver… (#8484) commit 67979049bb49029ae012b8c84c9535ffb10ee250 Author: Xuanwo Date: Fri Mar 10 22:09:00 2023 +0800 chore: Bump OpenDAL to latest version (#8481) Signed-off-by: Xuanwo commit 38edae844104170897ef50e77d7879af8548be55 Author: Dylan Date: Fri Mar 10 21:10:22 2023 +0800 feat(streaming): Temporal join part2 (#8466) commit e18f243dba6ca68d9344999afbe1f5cac07b9dfb Author: idx0-dev <124041366+idx0-dev@users.noreply.github.com> Date: Fri Mar 10 20:33:44 2023 +0800 feat(connector): unified csv parser (#8463) commit 2ae019d93fe469024d9a570e0097b2c490d394fc Author: Runji Wang Date: Fri Mar 10 18:19:30 2023 +0800 test(recovery): add recovery test for nexmark stream (#7623) Signed-off-by: Runji Wang Co-authored-by: Liang <44948473+soundOfDestiny@users.noreply.github.com> commit 25499e39fb9ea2939f3758239a4f3ce05be38ef5 Author: Liang <44948473+soundOfDestiny@users.noreply.github.com> Date: Fri Mar 10 17:54:11 2023 +0800 fix(recovery): wait_epoch should be called in recovery (close #8467) (#8468) commit 70f46f1eb5900e9e1461280810f9f07b5db9fc65 Author: Renjie Liu Date: Fri Mar 10 16:59:25 2023 +0800 refactor: Remove state reporter for local mode (#8477) commit eb24cda639954b44c4379f041c0513e6e90d3682 Author: Bugen Zhao Date: Fri Mar 10 16:43:59 2023 +0800 fix: revert extension of scaling test timeout (#8462) Signed-off-by: Bugen Zhao commit 8d0e869197a0e092d29a084ba4a4c6e1b127c36d Author: Richard Chien Date: Fri Mar 10 16:00:53 2023 +0800 refactor(common): unify order-related types (#8449) Signed-off-by: Richard Chien Co-authored-by: Bugen Zhao commit 4b008ac26cb2b7aac5922bcd732a44ece6bedbea Author: WillyKidd <57129289+WillyKidd@users.noreply.github.com> Date: Fri Mar 10 15:21:52 2023 +0800 test(connector): add test cases for debezium json test (#8334) Co-authored-by: StrikeW commit 79b499ce1895e1eaedb40c3b34e0ea0deef7bb2b Author: stonepage <40830455+st1page@users.noreply.github.com> Date: Fri Mar 10 15:02:19 2023 +0800 refactor(optimizer): move some methods into core struct && refactor the join's predicate push down (#8455) commit 64d80d2068465f6a3258c1c6cd18fd073fea16c6 Author: August Date: Fri Mar 10 14:22:46 2023 +0800 feat(test): optimize set stmts in simulation to avoid duplicate replay (#8420) commit b6244d7aac8212782361596f669bbbe057d78f92 Author: Eric Fu Date: Fri Mar 10 13:43:09 2023 +0800 chore: update and add connector-node to docker-compose.yml (#8427) commit 0a7a47e8766ea5baacfa20bd60866afe98ef1757 Author: Bohan Zhang Date: Fri Mar 10 11:21:18 2023 +0700 chore: add license check path (#8459) Signed-off-by: tabVersion Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> commit 614b6c5e1c6cedfe3ed5184eecc18d365606439c Author: William Wen <44139337+wenym1@users.noreply.github.com> Date: Fri Mar 10 11:41:18 2023 +0800 fix(sink): fix connector node sink json payload serialization (#8461) commit 2f626d9681e70304e1c709d6fa2f3a0b9604372e Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Fri Mar 10 07:40:09 2023 +0800 refactor(common): make certain `IntervalUnit` constructors test-only (#8464) commit 86188efc148dad2621ce40e0efc1c927276cb25f Author: xxchan Date: Thu Mar 9 23:20:59 2023 +0100 chore: change storage log level to WARN for playground (#8406) commit 6f3eb54ebfb325b513e399a6bfa393ce369dec21 Author: Shuxian Wang Date: Thu Mar 9 06:38:47 2023 -0800 feat(temporal-join): Temporal join executor (#8412) Co-authored-by: Dylan Chen Co-authored-by: Dylan commit 5294f430bb94cc6f5c0accf9359d51f4f8b28207 Author: xiangjinwu <17769960+xiangjinwu@users.noreply.github.com> Date: Thu Mar 9 20:50:54 2023 +0800 refactor(common): cleanup unused methods on `IntervalUnit` (#8456) commit f12b263d03f10b6b95f0a5516393ef710598e082 Author: Li0k Date: Thu Mar 9 20:06:49 2023 +0800 feat(storage): monitor avg_key_size and avg_epoch_count (#8297) commit 953e4b23dd518f6d78c59ebfc9af50039b1858af Author: ZENOTME <43447882+ZENOTME@users.noreply.github.com> Date: Thu Mar 9 17:38:57 2023 +0800 feat(common): support from_binary and from_text in ScalarImpl (#8421) --- .dockerignore | 2 +- .gitattributes | 2 - .github/labeler.yml | 2 + .github/pr-title-checker-config.json | 2 +- .../workflows/connector-node-integration.yml | 2 +- .github/workflows/dashboard_main.yml | 8 +- .github/workflows/intergration_tests.yml | 113 + .github/workflows/typo.yml | 2 +- .gitignore | 4 +- .licenserc.yaml | 3 + .typos.toml | 2 + Cargo.lock | 839 +-- Cargo.toml | 5 + Makefile.toml | 74 +- ci/scripts/build.sh | 3 +- ci/scripts/common.env.sh | 2 + ci/scripts/deterministic-e2e-test.sh | 2 +- ci/scripts/e2e-iceberg-sink-test.sh | 22 +- ci/scripts/e2e-sink-test.sh | 30 +- ci/scripts/e2e-source-test.sh | 14 +- ci/scripts/e2e-test-parallel-for-opendal.sh | 63 + ci/scripts/e2e-test-parallel-in-memory.sh | 2 +- ci/scripts/java-binding-test.sh | 3 + ci/scripts/run-e2e-test.sh | 8 +- .../s3-source-test-for-opendal-fs-engine.sh | 56 + ci/scripts/s3-source-test.sh | 9 +- ci/workflows/main-cron.yml | 44 +- ci/workflows/main.yml | 4 +- ci/workflows/pull-request.yml | 18 +- dashboard/.gitignore | 5 +- dashboard/README.md | 38 +- dashboard/package-lock.json | 1 + dashboard/package.json | 3 +- dashboard/proto/gen/backup_service.ts | 390 -- dashboard/proto/gen/batch_plan.ts | 2472 --------- dashboard/proto/gen/catalog.ts | 1314 ----- dashboard/proto/gen/common.ts | 659 --- dashboard/proto/gen/compactor.ts | 100 - dashboard/proto/gen/compute.ts | 74 - dashboard/proto/gen/connector_service.ts | 1066 ---- dashboard/proto/gen/data.ts | 958 ---- dashboard/proto/gen/ddl_service.ts | 1516 ------ dashboard/proto/gen/expr.ts | 1200 ----- dashboard/proto/gen/health.ts | 117 - dashboard/proto/gen/hummock.ts | 4564 ----------------- dashboard/proto/gen/java_binding.ts | 237 - dashboard/proto/gen/meta.ts | 2637 ---------- dashboard/proto/gen/monitor_service.ts | 274 - dashboard/proto/gen/order.ts | 128 - dashboard/proto/gen/plan_common.ts | 414 -- dashboard/proto/gen/source.ts | 162 - dashboard/proto/gen/stream_plan.ts | 4432 ---------------- dashboard/proto/gen/stream_service.ts | 725 --- dashboard/proto/gen/task_service.ts | 544 -- dashboard/proto/gen/user.ts | 889 ---- dashboard/scripts/generate_proto.sh | 6 +- docker/Dockerfile | 8 +- docker/docker-compose.yml | 63 +- docker/prometheus.yaml | 14 +- docs/developer-guide.md | 6 +- e2e_test/batch/aggregate/array_agg.slt.part | 23 +- e2e_test/batch/aggregate/sum.slt.part | 2 +- e2e_test/batch/basic/dml.slt.part | 21 + e2e_test/batch/basic/generate_series.slt.part | 4 + e2e_test/batch/basic/index.slt.part | 59 +- e2e_test/batch/basic/join.slt.part | 26 +- e2e_test/batch/basic/order_by.slt.part | 54 +- e2e_test/batch/basic/time_window.slt.part | 117 + e2e_test/batch/explain.slt | 2 +- e2e_test/batch/functions/pi.slt.part | 32 + e2e_test/batch/types/interval.slt.part | 67 +- .../batch/types/temporal_arithmetic.slt.part | 10 +- e2e_test/ddl/alter_rename_relation.slt | 148 + e2e_test/ddl/alter_table_column.slt | 47 +- e2e_test/ddl/invalid_operation.slt | 2 +- e2e_test/ddl/show.slt | 2 +- e2e_test/ddl/table.slt | 259 +- e2e_test/ddl/table/generated_columns.slt.part | 41 + e2e_test/ddl/table/table.slt.part | 261 + .../basic.slt | 27 +- e2e_test/extended_mode/type.slt | 28 + e2e_test/s3/run_csv.py | 155 + e2e_test/sink/append_only_sink.slt | 38 +- e2e_test/sink/iceberg_sink.slt | 27 +- e2e_test/sink/remote/jdbc.check.pg.slt | 10 +- e2e_test/sink/remote/jdbc.load.slt | 24 +- e2e_test/sink/remote/mysql_create_table.sql | 11 + .../sink/remote/mysql_expected_result.tsv | 5 + e2e_test/sink/remote/pg_create_table.sql | 11 + e2e_test/source/basic/kafka.slt | 4 +- e2e_test/source/cdc/cdc.load.slt | 1 - e2e_test/source/cdc/cdc.validate.postgres.slt | 3 - e2e_test/streaming/array_agg.slt | 17 +- e2e_test/streaming/bug_fixes/issue_8084.slt | 24 + e2e_test/streaming/bug_fixes/issue_8570.slt | 54 + e2e_test/streaming/order_by.slt | 58 +- e2e_test/streaming/temporal_join.slt | 64 + e2e_test/udf/python.slt | 27 + e2e_test/udf/test.py | 17 +- grafana/risingwave-dashboard.dashboard.py | 40 +- grafana/risingwave-dashboard.json | 2 +- integration_tests/README.md | 46 + integration_tests/ad-click/create_mv.sql | 13 + integration_tests/ad-click/create_source.sql | 13 + integration_tests/ad-click/data_check | 1 + integration_tests/ad-click/docker-compose.yml | 62 + integration_tests/ad-click/query.sql | 6 + integration_tests/ad-ctr/create_mv.sql | 64 + integration_tests/ad-ctr/create_source.sql | 20 + integration_tests/ad-ctr/data_check | 1 + integration_tests/ad-ctr/docker-compose.yml | 62 + integration_tests/ad-ctr/query.sql | 6 + integration_tests/cdn-metrics/create_mv.sql | 79 + .../cdn-metrics/create_source.sql | 26 + integration_tests/cdn-metrics/data_check | 1 + .../cdn-metrics/docker-compose.yml | 62 + integration_tests/cdn-metrics/query.sql | 8 + integration_tests/clickstream/create_mv.sql | 34 + .../clickstream/create_source.sql | 14 + integration_tests/clickstream/data_check | 1 + .../clickstream/docker-compose.yml | 62 + integration_tests/clickstream/query.sql | 19 + integration_tests/datagen/.gitignore | 1 + integration_tests/datagen/.goreleaser.yaml | 21 + integration_tests/datagen/Dockerfile | 8 + .../datagen/ad_click/ad_click.go | 58 + integration_tests/datagen/ad_ctr/ad_ctr.go | 111 + .../datagen/cdn_metrics/cdn_metrics.go | 32 + integration_tests/datagen/cdn_metrics/nics.go | 108 + integration_tests/datagen/cdn_metrics/tcp.go | 88 + .../datagen/clickstream/clickstream.go | 153 + .../datagen/delivery/delivery.go | 73 + .../datagen/ecommerce/ecommerce.go | 139 + integration_tests/datagen/gen/generator.go | 96 + integration_tests/datagen/go.mod | 78 + integration_tests/datagen/go.sum | 836 +++ .../datagen/livestream/livestream.go | 147 + .../datagen/livestream/proto/livestream.pb.go | 256 + integration_tests/datagen/load_gen.go | 133 + integration_tests/datagen/main.go | 214 + integration_tests/datagen/nexmark/auction.go | 67 + integration_tests/datagen/sink/kafka/kafka.go | 139 + .../datagen/sink/kinesis/kinesis.go | 52 + integration_tests/datagen/sink/mysql/mysql.go | 51 + .../datagen/sink/postgres/postgres.go | 47 + .../datagen/sink/pulsar/pulsar.go | 60 + integration_tests/datagen/sink/sink.go | 63 + integration_tests/datagen/twitter/avro.go | 45 + .../datagen/twitter/proto/twitter.pb.go | 347 ++ integration_tests/datagen/twitter/twitter.go | 167 + .../datagen/twitter/twitter_example.json | 14 + integration_tests/delivery/delivery.sql | 25 + integration_tests/delivery/docker-compose.yml | 60 + integration_tests/ecommerce/ecommerce.sql | 60 + integration_tests/iceberg-sink/README.md | 36 + integration_tests/iceberg-sink/create_mv.sql | 7 + .../iceberg-sink/create_sink.sql | 13 + .../iceberg-sink/create_source.sql | 19 + .../iceberg-sink/docker-compose.yml | 98 + .../iceberg-sink/iceberg-query.sql | 1 + .../iceberg-sink/mysql_prepare.sql | 15 + .../presto-with-iceberg/Dockerfile | 5 + .../presto-with-iceberg/hadoop-catalog.xml | 22 + .../presto-with-iceberg/iceberg.properties | 6 + .../presto-with-iceberg/log.properties | 2 + .../iceberg-sink/spark-script/.gitignore | 3 + .../spark-script/create-table.sql | 11 + .../iceberg-sink/spark-script/query-table.sql | 1 + .../iceberg-sink/spark-script/run-sql-file.sh | 13 + integration_tests/livestream/create_mv.sql | 69 + .../livestream/create_source.sql | 26 + integration_tests/livestream/data_check | 1 + .../livestream/docker-compose.yml | 62 + integration_tests/livestream/livestream.proto | 19 + integration_tests/livestream/pb/create_mv.sql | 62 + .../livestream/pb/create_source.sql | 6 + integration_tests/livestream/query.sql | 19 + integration_tests/livestream/schema | 18 + integration_tests/mysql-cdc/create_mv.sql | 8 + integration_tests/mysql-cdc/create_source.sql | 18 + integration_tests/mysql-cdc/data_check | 1 + .../mysql-cdc/docker-compose.yml | 78 + integration_tests/mysql-cdc/mysql_prepare.sql | 28 + integration_tests/mysql-cdc/query.sql | 6 + integration_tests/mysql-sink/create_mv.sql | 16 + .../mysql-sink/create_source.sql | 14 + integration_tests/mysql-sink/data_check | 1 + .../mysql-sink/docker-compose.yml | 93 + .../mysql-sink/mysql_prepare.sql | 4 + integration_tests/mysql-sink/query.sql | 6 + integration_tests/postgres-cdc/create_mv.sql | 28 + .../postgres-cdc/create_source.sql | 41 + integration_tests/postgres-cdc/data_check | 1 + .../postgres-cdc/docker-compose.yml | 96 + .../postgres-cdc/postgres_prepare.sql | 112 + integration_tests/postgres-cdc/query.sql | 6 + integration_tests/postgres-sink/README.md | 16 + integration_tests/postgres-sink/create_mv.sql | 16 + .../postgres-sink/create_source.sql | 14 + integration_tests/postgres-sink/data_check | 1 + .../postgres-sink/docker-compose.yml | 96 + .../postgres-sink/postgres_prepare.sql | 4 + integration_tests/postgres-sink/query.sql | 6 + integration_tests/prometheus/create_mv.sql | 16 + .../prometheus/create_source.sql | 13 + integration_tests/prometheus/create_user.sql | 6 + integration_tests/prometheus/data_check | 1 + .../prometheus/docker-compose.yml | 94 + integration_tests/prometheus/prometheus.yaml | 37 + integration_tests/prometheus/query.sql | 8 + .../schema-registry/create_mv.sql | 10 + .../schema-registry/create_source.sql | 8 + integration_tests/schema-registry/data_check | 1 + integration_tests/schema-registry/datagen.py | 144 + .../schema-registry/docker-compose.yml | 68 + integration_tests/schema-registry/query.sql | 6 + integration_tests/schema-registry/readme.md | 95 + integration_tests/scripts/.gitignore | 4 + integration_tests/scripts/check_data.py | 49 + integration_tests/scripts/gen_pb_compose.py | 48 + integration_tests/scripts/run_demos.py | 111 + integration_tests/superset/create_mv.sql | 13 + integration_tests/superset/create_source.sql | 26 + integration_tests/superset/docker-compose.yml | 137 + .../superset/docker/.env-non-dev | 46 + .../superset/docker/docker-bootstrap.sh | 51 + .../superset/docker/docker-init.sh | 79 + .../superset/docker/pythonpath_dev/.gitignore | 23 + .../docker/pythonpath_dev/superset_config.py | 124 + .../superset/docker/requirements-local.txt | 1 + .../superset/docker/run-server.sh | 33 + integration_tests/superset/query.sql | 6 + .../tidb-cdc-sink/config/changefeed.toml | 4 + .../tidb-cdc-sink/config/pd.toml | 86 + .../tidb-cdc-sink/config/tidb.toml | 239 + .../tidb-cdc-sink/config/tikv.toml | 497 ++ integration_tests/tidb-cdc-sink/create_mv.sql | 31 + .../tidb-cdc-sink/create_source.sql | 27 + integration_tests/tidb-cdc-sink/data_check | 1 + .../tidb-cdc-sink/docker-compose.yml | 232 + integration_tests/tidb-cdc-sink/query.sql | 8 + .../tidb-cdc-sink/tidb_create_tables.sql | 25 + .../twitter-pulsar/create_mv.sql | 34 + .../twitter-pulsar/create_source.sql | 19 + .../twitter-pulsar/docker-compose.yml | 67 + integration_tests/twitter-pulsar/query.sql | 8 + integration_tests/twitter/avro.json | 27 + integration_tests/twitter/avro/create_mv.sql | 21 + .../twitter/avro/create_source.sql | 6 + integration_tests/twitter/create_mv.sql | 21 + integration_tests/twitter/create_source.sql | 19 + integration_tests/twitter/data_check | 1 + integration_tests/twitter/docker-compose.yml | 62 + integration_tests/twitter/pb/create_mv.sql | 21 + .../twitter/pb/create_source.sql | 6 + integration_tests/twitter/query.sql | 8 + integration_tests/twitter/schema | 19 + integration_tests/twitter/twitter.proto | 24 + java/com_risingwave_java_binding_Binding.h | 44 +- .../com/risingwave/java/utils/MetaClient.java | 14 + .../risingwave/java/utils/MinioUrlParser.java | 14 + .../risingwave/java/utils/VnodeHelper.java | 14 + .../connector/utils/MinioUrlParserTest.java | 14 + .../connector/utils/VnodeHelperTest.java | 14 + .../assembly/scripts/start-service.sh | 5 +- .../connector/api/PkComparator.java | 14 + .../risingwave/connector/api/TableSchema.java | 28 + .../connector/api/sink/ArraySinkRow.java | 45 + .../connector/api/sink/ArraySinkrow.java | 28 - .../connector/api/sink/CloseableIterator.java | 21 + .../connector/api/sink/Deserializer.java | 22 + .../risingwave/connector/api/sink/Sink.java | 14 + .../connector/api/sink/SinkBase.java | 14 + .../connector/api/sink/SinkFactory.java | 17 +- .../connector/api/sink/SinkRow.java | 22 +- .../api/sink/TrivialCloseIterator.java | 41 + .../connector/api/source/CdcEngine.java | 14 + .../connector/api/source/CdcEngineRunner.java | 14 + .../connector/api/source/ConnectorConfig.java | 37 - .../connector/api/source/SourceConfig.java | 14 + .../connector/api/source/SourceHandler.java | 16 +- .../connector/api/source/SourceTypeE.java | 14 + .../python-client/integration_tests.py | 37 +- .../python-client/pyspark-util.py | 15 + .../risingwave-connector-service/pom.xml | 17 +- .../connector/ConnectorService.java | 14 + .../connector/ConnectorServiceImpl.java | 14 + .../risingwave/connector/Deserializer.java | 8 - .../com/risingwave/connector/FileSink.java | 55 +- .../risingwave/connector/FileSinkFactory.java | 21 +- .../connector/JsonDeserializer.java | 189 +- .../com/risingwave/connector/PrintSink.java | 33 +- .../connector/PrintSinkFactory.java | 18 +- .../connector/SinkStreamObserver.java | 75 +- .../com/risingwave/connector/SinkUtils.java | 14 + .../connector/SinkValidationHandler.java | 23 +- .../deserializer/StreamChunkDeserializer.java | 215 + .../metrics/ConnectorNodeMetrics.java | 14 + .../metrics/MonitoredRowIterator.java | 14 + .../sourcenode/SourceRequestHandler.java | 221 +- .../sourcenode/common/DbzConnectorConfig.java | 133 + .../sourcenode/common/DebeziumCdcUtils.java | 24 - ...efaultCdcEngine.java => DbzCdcEngine.java} | 39 +- ...ineRunner.java => DbzCdcEngineRunner.java} | 56 +- ...Consumer.java => DbzCdcEventConsumer.java} | 20 +- ...urceHandler.java => DbzSourceHandler.java} | 45 +- .../sourcenode/core/SourceHandlerFactory.java | 33 +- .../sourcenode/mysql/MySqlSourceConfig.java | 95 - .../postgres/PostgresSourceConfig.java | 110 - .../src/main/resources/debezium.properties | 8 + .../src/main/resources/log4j.properties | 14 - .../src/main/resources/log4j2.properties | 12 + .../src/main/resources/mysql.properties | 21 + .../src/main/resources/postgres.properties | 30 + .../main/resources/validate_sql.properties | 25 + .../connector/DeserializerTest.java | 21 +- .../risingwave/connector/FileSinkTest.java | 24 +- .../risingwave/connector/PrintSinkTest.java | 24 +- .../connector/SinkStreamObserverTest.java | 19 +- .../risingwave-sink-deltalake/pom.xml | 4 +- .../risingwave/connector/DeltaLakeSink.java | 53 +- .../connector/DeltaLakeSinkFactory.java | 28 +- .../connector/DeltaLakeSinkUtil.java | 14 + .../connector/DeltaLakeLocalSinkTest.java | 24 +- .../connector/DeltaLakeSinkFactoryTest.java | 14 + .../risingwave-sink-iceberg/pom.xml | 4 +- .../com/risingwave/connector/IcebergSink.java | 123 +- .../connector/IcebergSinkFactory.java | 209 +- .../com/risingwave/connector/SinkRowMap.java | 26 +- .../com/risingwave/connector/SinkRowOp.java | 32 +- .../connector/UpsertIcebergSink.java | 119 +- .../connector/IcebergSinkFactoryTest.java | 21 +- .../connector/IcebergSinkLocalTest.java | 24 +- .../connector/IcebergSinkPartitionTest.java | 24 +- .../risingwave/connector/SinkRowMapTest.java | 121 +- .../connector/UpsertIcebergSinkLocalTest.java | 36 +- .../UpsertIcebergSinkPartitionTest.java | 36 +- .../risingwave-sink-jdbc/pom.xml | 4 +- .../com/risingwave/connector/JDBCSink.java | 168 +- .../risingwave/connector/JDBCSinkFactory.java | 91 +- .../risingwave/connector/JDBCSinkTest.java | 30 +- .../converters/DatetimeTypeConverter.java | 19 +- .../risingwave-source-test/pom.xml | 91 + .../risingwave/connector/MySQLSourceTest.java | 205 + .../connector/PostgresSourceTest.java | 257 + .../connector/SourceTestClient.java | 210 + .../src/test/resources/my.cnf | 7 + .../src/test/resources/orders.tbl | 0 .../test/resources/stored_queries.properties | 3 + .../{Demo.java => HummockReadDemo.java} | 59 +- .../java/binding/StreamChunkDemo.java | 43 + .../com/risingwave/java/binding/Utils.java | 56 + .../com/risingwave/java/binding/BaseRow.java | 65 + .../com/risingwave/java/binding/Binding.java | 31 +- .../java/binding/HummockIterator.java | 43 + .../com/risingwave/java/binding/Iterator.java | 29 - .../com/risingwave/java/binding/KeyedRow.java | 64 +- .../java/binding/StreamChunkIterator.java | 41 + .../java/binding/StreamChunkRow.java | 27 + java/pom.xml | 20 +- java/tools/maven/checkstyle.xml | 5 - proto/catalog.proto | 32 +- proto/common.proto | 14 +- proto/connector_service.proto | 17 +- proto/data.proto | 3 +- proto/ddl_service.proto | 49 + proto/expr.proto | 24 + proto/hummock.proto | 70 +- proto/meta.proto | 38 +- proto/plan_common.proto | 9 +- proto/stream_plan.proto | 54 +- proto/task_service.proto | 9 +- risedev.yml | 107 +- src/batch/Cargo.toml | 2 +- src/batch/benches/expand.rs | 4 +- src/batch/benches/filter.rs | 10 +- src/batch/benches/hash_agg.rs | 4 +- src/batch/benches/hash_join.rs | 12 +- src/batch/benches/limit.rs | 4 +- src/batch/benches/nested_loop_join.rs | 10 +- src/batch/benches/sort.rs | 18 +- src/batch/benches/top_n.rs | 18 +- src/batch/src/error.rs | 7 + src/batch/src/executor/filter.rs | 6 +- src/batch/src/executor/generic_exchange.rs | 14 +- src/batch/src/executor/group_top_n.rs | 38 +- src/batch/src/executor/hash_agg.rs | 10 +- src/batch/src/executor/hop_window.rs | 117 +- src/batch/src/executor/insert.rs | 11 +- .../executor/join/distributed_lookup_join.rs | 8 +- src/batch/src/executor/join/hash_join.rs | 89 +- .../src/executor/join/local_lookup_join.rs | 99 +- src/batch/src/executor/join/mod.rs | 22 +- .../src/executor/join/nested_loop_join.rs | 36 +- src/batch/src/executor/merge_sort_exchange.rs | 32 +- src/batch/src/executor/order_by.rs | 156 +- src/batch/src/executor/project.rs | 10 +- src/batch/src/executor/project_set.rs | 11 +- src/batch/src/executor/row_seq_scan.rs | 21 +- src/batch/src/executor/sort_agg.rs | 61 +- src/batch/src/executor/table_function.rs | 2 +- src/batch/src/executor/test_utils.rs | 8 +- src/batch/src/executor/top_n.rs | 50 +- src/batch/src/executor/update.rs | 10 +- src/batch/src/executor/values.rs | 2 +- src/batch/src/rpc/service/exchange.rs | 16 +- src/batch/src/rpc/service/task_service.rs | 19 +- .../task/consistent_hash_shuffle_channel.rs | 28 +- src/batch/src/task/data_chunk_in_channel.rs | 6 +- src/batch/src/task/task_execution.rs | 123 +- src/batch/src/task/task_manager.rs | 35 +- src/bench/Cargo.toml | 2 +- src/cmd/Cargo.toml | 2 +- src/cmd/src/bin/compactor.rs | 4 +- src/cmd/src/bin/compute_node.rs | 4 +- src/cmd/src/bin/ctl.rs | 4 +- src/cmd/src/bin/frontend_node.rs | 4 +- src/cmd/src/bin/meta_node.rs | 4 +- src/cmd_all/Cargo.toml | 4 +- src/cmd_all/src/bin/risingwave.rs | 6 +- src/cmd_all/src/playground.rs | 38 +- src/common/Cargo.toml | 38 +- src/common/benches/bench_encoding.rs | 19 +- src/common/benches/bench_hash_key_encoding.rs | 224 + src/common/benches/bench_row.rs | 4 +- .../common_service/src/observer_manager.rs | 12 +- src/common/src/array/arrow.rs | 6 +- src/common/src/array/bool_array.rs | 6 +- src/common/src/array/bytes_array.rs | 6 +- src/common/src/array/column.rs | 22 +- src/common/src/array/column_proto_readers.rs | 14 +- src/common/src/array/data_chunk.rs | 61 +- src/common/src/array/error.rs | 10 +- src/common/src/array/interval_array.rs | 4 +- src/common/src/array/iterator.rs | 3 +- src/common/src/array/jsonb_array.rs | 36 +- src/common/src/array/list_array.rs | 22 +- src/common/src/array/mod.rs | 49 +- src/common/src/array/primitive_array.rs | 6 +- src/common/src/array/serial_array.rs | 6 + src/common/src/array/stream_chunk.rs | 35 +- src/common/src/array/struct_array.rs | 20 +- src/common/src/array/utf8_array.rs | 6 +- src/common/src/array/vis.rs | 8 +- src/common/src/bin/default_config.rs | 21 + src/common/src/buffer/bitmap.rs | 12 +- src/common/src/catalog/column.rs | 24 +- src/common/src/catalog/mod.rs | 3 +- src/common/src/catalog/physical_table.rs | 32 +- src/common/src/catalog/schema.rs | 21 +- src/common/src/catalog/test_utils.rs | 1 + src/common/src/config.rs | 187 +- src/common/src/constants.rs | 2 + src/common/src/error.rs | 6 +- src/common/src/field_generator/mod.rs | 32 +- src/common/src/field_generator/numeric.rs | 14 +- src/common/src/field_generator/timestamp.rs | 18 +- .../src/hash/consistent_hash/mapping.rs | 17 +- src/common/src/hash/consistent_hash/vnode.rs | 89 +- src/common/src/hash/dispatcher.rs | 6 +- src/common/src/hash/key.rs | 61 +- src/common/src/hash/mod.rs | 2 +- src/common/src/jemalloc.rs | 8 +- src/common/src/lib.rs | 3 +- src/common/src/monitor/mod.rs | 8 + src/common/src/monitor/process_linux.rs | 30 +- src/common/src/row/mod.rs | 22 + src/common/src/row/owned_row.rs | 67 +- src/common/src/session_config/mod.rs | 45 +- src/common/src/system_param/local_manager.rs | 6 + src/common/src/system_param/mod.rs | 18 +- src/common/src/system_param/reader.rs | 24 +- src/common/src/telemetry/manager.rs | 163 + src/common/src/telemetry/mod.rs | 226 + src/common/src/telemetry/report.rs | 112 + src/common/src/test_utils/rand_array.rs | 18 +- src/common/src/types/chrono_wrapper.rs | 25 +- src/common/src/types/decimal.rs | 6 + src/common/src/types/interval.rs | 934 ++-- src/common/src/types/mod.rs | 569 +- src/common/src/types/postgres_type.rs | 3 + src/common/src/util/addr.rs | 33 +- src/common/src/util/chunk_coalesce.rs | 13 + src/common/src/util/column_index_mapping.rs | 8 +- .../src/util/encoding_for_comparison.rs | 144 - src/common/src/util/memcmp_encoding.rs | 558 ++ src/common/src/util/mod.rs | 3 +- src/common/src/util/ordered/mod.rs | 153 - src/common/src/util/ordered/serde.rs | 167 +- src/{source/src => common/src/util}/row_id.rs | 9 + src/common/src/util/scan_range.rs | 52 +- src/common/src/util/schema_check.rs | 49 +- src/common/src/util/sort_util.rs | 424 +- .../column_aware_row_encoding.rs | 7 +- src/common/src/util/value_encoding/mod.rs | 10 +- src/compute/Cargo.toml | 2 + src/compute/src/lib.rs | 44 +- .../src/memory_management/memory_manager.rs | 25 +- src/compute/src/memory_management/mod.rs | 97 + src/compute/src/memory_management/policy.rs | 48 +- src/compute/src/rpc/service/stream_service.rs | 2 +- src/compute/src/server.rs | 78 +- src/compute/src/telemetry.rs | 76 + src/compute/tests/integration_tests.rs | 26 +- src/config/ci-compaction-test-meta.toml | 18 +- src/config/ci-compaction-test.toml | 14 +- src/config/ci-iceberg-test.toml | 4 +- src/config/ci-meta-backup-test.toml | 6 +- src/config/ci-recovery.toml | 6 +- src/config/ci.toml | 6 +- src/config/example.toml | 92 +- src/connector/Cargo.toml | 7 +- src/connector/src/common.rs | 21 + src/connector/src/lib.rs | 8 +- src/connector/src/macros.rs | 37 +- src/connector/src/parser/avro/parser.rs | 138 +- src/connector/src/parser/avro/util.rs | 57 +- .../src/parser/canal/simd_json_parser.rs | 22 +- src/connector/src/parser/common.rs | 72 +- src/connector/src/parser/csv_parser.rs | 553 +- .../src/parser/debezium/avro_parser.rs | 40 +- .../src/parser/debezium/simd_json_parser.rs | 419 +- src/connector/src/parser/json_parser.rs | 57 +- src/connector/src/parser/macros.rs | 18 +- .../src/parser/maxwell/simd_json_parser.rs | 24 +- src/connector/src/parser/mod.rs | 3 +- src/connector/src/parser/protobuf/parser.rs | 34 +- src/connector/src/sink/catalog/desc.rs | 29 +- src/connector/src/sink/catalog/mod.rs | 76 +- src/connector/src/sink/kafka.rs | 137 +- src/connector/src/sink/mod.rs | 144 +- src/connector/src/sink/remote.rs | 282 +- src/connector/src/source/base.rs | 68 +- .../src/source/cdc/enumerator/mod.rs | 2 +- .../src/source/cdc/source/message.rs | 3 +- src/connector/src/source/cdc/split.rs | 10 +- .../src/source/datagen/source/generator.rs | 106 +- .../src/source/datagen/source/reader.rs | 54 +- src/connector/src/source/datagen/split.rs | 10 +- .../src/source/filesystem/file_common.rs | 9 +- .../src/source/filesystem/nd_streaming.rs | 6 +- .../src/source/filesystem/s3/source/reader.rs | 7 +- .../source/google_pubsub/source/message.rs | 3 +- .../src/source/google_pubsub/split.rs | 10 +- .../src/source/kafka/enumerator/client.rs | 12 +- src/connector/src/source/kafka/mod.rs | 5 +- .../src/source/kafka/private_link.rs | 67 + .../src/source/kafka/source/message.rs | 5 +- .../src/source/kafka/source/reader.rs | 12 +- src/connector/src/source/kafka/split.rs | 10 +- .../src/source/kinesis/source/message.rs | 5 +- src/connector/src/source/kinesis/split.rs | 10 +- src/connector/src/source/manager.rs | 29 +- .../source/nexmark/source/combined_event.rs | 2 +- .../src/source/nexmark/source/message.rs | 12 +- src/connector/src/source/nexmark/split.rs | 10 +- .../src/source/pulsar/source/message.rs | 2 +- src/connector/src/source/pulsar/split.rs | 10 +- src/ctl/src/cmd_impl/bench.rs | 12 +- .../src/cmd_impl/hummock/compaction_group.rs | 4 + src/ctl/src/cmd_impl/hummock/list_kv.rs | 12 +- src/ctl/src/cmd_impl/hummock/sst_dump.rs | 280 +- src/ctl/src/cmd_impl/meta.rs | 2 + src/ctl/src/cmd_impl/meta/connection.rs | 72 + src/ctl/src/cmd_impl/table/scan.rs | 21 +- src/ctl/src/common.rs | 1 + src/ctl/src/common/context.rs | 9 +- src/ctl/src/common/hummock_service.rs | 51 +- src/ctl/src/lib.rs | 60 +- src/expr/Cargo.toml | 7 + src/expr/benches/expr.rs | 214 +- src/expr/macro/Cargo.toml | 15 + src/expr/macro/src/gen.rs | 286 ++ src/expr/macro/src/lib.rs | 114 + src/expr/macro/src/parse.rs | 160 + src/expr/macro/src/types.rs | 189 + src/expr/macro/src/utils.rs | 29 + src/expr/src/error.rs | 9 +- src/expr/src/expr/build_expr_from_prost.rs | 585 +-- src/expr/src/expr/data_types.rs | 15 + src/expr/src/expr/expr_array_concat.rs | 42 +- src/expr/src/expr/expr_array_distinct.rs | 212 +- src/expr/src/expr/expr_array_length.rs | 63 + src/expr/src/expr/expr_array_to_string.rs | 237 +- src/expr/src/expr/expr_binary_bytes.rs | 181 - src/expr/src/expr/expr_binary_nonnull.rs | 950 +--- src/expr/src/expr/expr_binary_nullable.rs | 321 +- src/expr/src/expr/expr_case.rs | 174 +- src/expr/src/expr/expr_coalesce.rs | 25 +- src/expr/src/expr/expr_concat_ws.rs | 43 +- src/expr/src/expr/expr_field.rs | 21 +- src/expr/src/expr/expr_in.rs | 40 +- src/expr/src/expr/expr_input_ref.rs | 11 +- src/expr/src/expr/expr_is_null.rs | 59 +- src/expr/src/expr/expr_jsonb_access.rs | 165 +- src/expr/src/expr/expr_literal.rs | 38 +- src/expr/src/expr/expr_nested_construct.rs | 39 +- src/expr/src/expr/expr_now.rs | 24 + src/expr/src/expr/expr_quaternary_bytes.rs | 102 - src/expr/src/expr/expr_regexp.rs | 9 +- src/expr/src/expr/expr_some_all.rs | 104 +- src/expr/src/expr/expr_ternary_bytes.rs | 261 - src/expr/src/expr/expr_to_char_const_tmpl.rs | 56 +- .../src/expr/expr_to_timestamp_const_tmpl.rs | 57 +- src/expr/src/expr/expr_udf.rs | 29 +- src/expr/src/expr/expr_unary.rs | 472 +- src/expr/src/expr/expr_vnode.rs | 42 +- src/expr/src/expr/mod.rs | 76 +- src/expr/src/expr/template.rs | 87 +- src/expr/src/expr/template_fast.rs | 119 +- src/expr/src/expr/test_utils.rs | 148 +- src/expr/src/sig/cast.rs | 7 - src/expr/src/sig/func.rs | 383 +- .../src/table_function/generate_series.rs | 77 +- src/expr/src/table_function/mod.rs | 45 +- src/expr/src/table_function/regexp_matches.rs | 7 +- src/expr/src/table_function/unnest.rs | 7 +- src/expr/src/table_function/user_defined.rs | 112 + src/expr/src/vector_op/agg/aggregator.rs | 20 +- .../vector_op/agg/approx_count_distinct.rs | 16 +- src/expr/src/vector_op/agg/array_agg.rs | 74 +- src/expr/src/vector_op/agg/count_star.rs | 5 +- src/expr/src/vector_op/agg/filter.rs | 118 +- src/expr/src/vector_op/agg/general_agg.rs | 70 +- .../src/vector_op/agg/general_distinct_agg.rs | 63 +- src/expr/src/vector_op/agg/string_agg.rs | 70 +- src/expr/src/vector_op/arithmetic_op.rs | 321 +- src/expr/src/vector_op/array_access.rs | 41 +- src/expr/src/vector_op/ascii.rs | 10 +- src/expr/src/vector_op/bitwise_op.rs | 62 +- src/expr/src/vector_op/cast.rs | 590 +-- src/expr/src/vector_op/cmp.rs | 412 +- src/expr/src/vector_op/concat_op.rs | 9 +- src/expr/src/vector_op/conjunction.rs | 74 +- src/expr/src/vector_op/date_trunc.rs | 18 +- src/expr/src/vector_op/exp.rs | 2 + src/expr/src/vector_op/extract.rs | 72 +- src/expr/src/vector_op/format_type.rs | 11 +- src/expr/src/vector_op/jsonb_info.rs | 11 +- src/expr/src/vector_op/length.rs | 27 +- src/expr/src/vector_op/like.rs | 12 +- src/expr/src/vector_op/lower.rs | 12 +- src/expr/src/vector_op/ltrim.rs | 47 - src/expr/src/vector_op/md5.rs | 12 +- src/expr/src/vector_op/mod.rs | 7 +- src/expr/src/vector_op/overlay.rs | 6 +- src/expr/src/vector_op/position.rs | 4 +- src/expr/src/vector_op/repeat.rs | 12 +- src/expr/src/vector_op/replace.rs | 14 +- src/expr/src/vector_op/round.rs | 22 +- src/expr/src/vector_op/rtrim.rs | 46 - src/expr/src/vector_op/split_part.rs | 4 +- src/expr/src/vector_op/string.rs | 335 ++ src/expr/src/vector_op/substr.rs | 26 +- src/expr/src/vector_op/tests.rs | 252 - src/expr/src/vector_op/timestamptz.rs | 9 +- src/expr/src/vector_op/to_char.rs | 19 +- src/expr/src/vector_op/to_timestamp.rs | 3 +- src/expr/src/vector_op/translate.rs | 17 +- src/expr/src/vector_op/trim.rs | 122 +- src/expr/src/vector_op/trim_characters.rs | 89 - src/expr/src/vector_op/tumble.rs | 177 +- src/expr/src/vector_op/upper.rs | 12 +- src/frontend/Cargo.toml | 2 + src/frontend/planner_test/src/lib.rs | 43 +- .../planner_test/tests/testdata/agg.yaml | 83 +- .../tests/testdata/basic_query.yaml | 22 + .../tests/testdata/batch_dist_agg.yaml | 12 +- .../tests/testdata/bushy_join.yaml | 1405 +++++ .../tests/testdata/delta_join.yaml | 6 +- .../tests/testdata/distribution_derive.yaml | 983 ++-- .../planner_test/tests/testdata/explain.yaml | 2 +- .../planner_test/tests/testdata/expr.yaml | 14 +- .../tests/testdata/generated_columns.yaml | 11 + .../planner_test/tests/testdata/join.yaml | 122 +- .../planner_test/tests/testdata/nexmark.yaml | 1473 ++++-- .../tests/testdata/nexmark_source.yaml | 1557 +++--- .../tests/testdata/over_window_function.yaml | 20 +- .../tests/testdata/predicate_pushdown.yaml | 6 +- .../tests/testdata/stream_dist_agg.yaml | 1566 ++++-- .../planner_test/tests/testdata/subquery.yaml | 3 + .../testdata/subquery_expr_correlated.yaml | 41 +- .../tests/testdata/temporal_filter.yaml | 38 + .../tests/testdata/temporal_join.yaml | 154 + .../tests/testdata/time_window.yaml | 6 +- .../planner_test/tests/testdata/tpch.yaml | 2911 ++++++----- .../planner_test/tests/testdata/union.yaml | 162 +- .../planner_test/tests/testdata/update.yaml | 4 + .../tests/testdata/watermark.yaml | 69 +- src/frontend/src/binder/bind_param.rs | 195 + src/frontend/src/binder/delete.rs | 23 +- src/frontend/src/binder/expr/function.rs | 29 +- src/frontend/src/binder/expr/mod.rs | 16 +- src/frontend/src/binder/expr/order_by.rs | 21 +- src/frontend/src/binder/expr/value.rs | 1 + src/frontend/src/binder/insert.rs | 55 +- src/frontend/src/binder/mod.rs | 122 +- src/frontend/src/binder/query.rs | 37 +- src/frontend/src/binder/relation/join.rs | 10 + src/frontend/src/binder/relation/mod.rs | 50 +- src/frontend/src/binder/relation/share.rs | 7 + src/frontend/src/binder/relation/subquery.rs | 7 + .../src/binder/relation/table_or_source.rs | 17 +- src/frontend/src/binder/relation/watermark.rs | 12 + .../binder/relation/window_table_function.rs | 12 + src/frontend/src/binder/select.rs | 41 + src/frontend/src/binder/set_expr.rs | 15 + src/frontend/src/binder/statement.rs | 16 + src/frontend/src/binder/update.rs | 41 +- src/frontend/src/binder/values.rs | 16 + src/frontend/src/catalog/catalog_service.rs | 89 +- src/frontend/src/catalog/database_catalog.rs | 12 +- src/frontend/src/catalog/function_catalog.rs | 27 +- src/frontend/src/catalog/index_catalog.rs | 32 +- src/frontend/src/catalog/root_catalog.rs | 80 +- src/frontend/src/catalog/schema_catalog.rs | 108 +- src/frontend/src/catalog/source_catalog.rs | 8 +- .../src/catalog/system_catalog/mod.rs | 1 + .../catalog/system_catalog/pg_catalog/mod.rs | 16 + src/frontend/src/catalog/table_catalog.rs | 142 +- src/frontend/src/catalog/view_catalog.rs | 6 +- src/frontend/src/expr/expr_mutator.rs | 4 +- src/frontend/src/expr/expr_rewriter.rs | 8 +- src/frontend/src/expr/expr_visitor.rs | 6 +- src/frontend/src/expr/function_call.rs | 25 +- src/frontend/src/expr/input_ref.rs | 6 +- src/frontend/src/expr/literal.rs | 5 +- src/frontend/src/expr/mod.rs | 24 +- src/frontend/src/expr/order_by_expr.rs | 16 +- src/frontend/src/expr/parameter.rs | 79 + src/frontend/src/expr/table_function.rs | 44 +- src/frontend/src/expr/type_inference/func.rs | 166 +- .../src/expr/user_defined_function.rs | 7 +- src/frontend/src/expr/utils.rs | 3 +- .../src/handler/alter_relation_rename.rs | 198 + .../src/handler/alter_table_column.rs | 9 +- src/frontend/src/handler/create_function.rs | 65 +- src/frontend/src/handler/create_index.rs | 53 +- src/frontend/src/handler/create_mv.rs | 13 +- src/frontend/src/handler/create_sink.rs | 5 +- src/frontend/src/handler/create_source.rs | 60 +- src/frontend/src/handler/create_table.rs | 281 +- src/frontend/src/handler/create_table_as.rs | 9 +- src/frontend/src/handler/create_view.rs | 4 +- src/frontend/src/handler/describe.rs | 19 +- src/frontend/src/handler/explain.rs | 3 +- src/frontend/src/handler/extended_handle.rs | 94 + src/frontend/src/handler/handle_privilege.rs | 34 +- src/frontend/src/handler/mod.rs | 55 +- src/frontend/src/handler/privilege.rs | 34 +- src/frontend/src/handler/query.rs | 252 +- src/frontend/src/handler/show.rs | 2 +- src/frontend/src/handler/variable.rs | 23 +- src/frontend/src/lib.rs | 11 +- src/frontend/src/observer/observer_manager.rs | 130 +- .../src/optimizer/logical_optimization.rs | 33 +- src/frontend/src/optimizer/mod.rs | 131 +- .../plan_expr_rewriter/const_eval_rewriter.rs | 1 + .../src/optimizer/plan_node/batch_delete.rs | 4 +- .../src/optimizer/plan_node/batch_exchange.rs | 6 +- .../src/optimizer/plan_node/batch_expand.rs | 4 +- .../src/optimizer/plan_node/batch_filter.rs | 4 +- .../optimizer/plan_node/batch_group_topn.rs | 4 +- .../src/optimizer/plan_node/batch_hash_agg.rs | 4 +- .../optimizer/plan_node/batch_hash_join.rs | 10 +- .../optimizer/plan_node/batch_hop_window.rs | 4 +- .../src/optimizer/plan_node/batch_insert.rs | 4 +- .../src/optimizer/plan_node/batch_limit.rs | 6 +- .../optimizer/plan_node/batch_lookup_join.rs | 6 +- .../plan_node/batch_nested_loop_join.rs | 4 +- .../src/optimizer/plan_node/batch_project.rs | 4 +- .../optimizer/plan_node/batch_project_set.rs | 4 +- .../src/optimizer/plan_node/batch_seq_scan.rs | 8 +- .../optimizer/plan_node/batch_simple_agg.rs | 5 +- .../src/optimizer/plan_node/batch_sort.rs | 6 +- .../src/optimizer/plan_node/batch_sort_agg.rs | 18 +- .../src/optimizer/plan_node/batch_source.rs | 5 +- .../plan_node/batch_table_function.rs | 6 +- .../src/optimizer/plan_node/batch_topn.rs | 4 +- .../src/optimizer/plan_node/batch_union.rs | 4 +- .../src/optimizer/plan_node/batch_update.rs | 4 +- .../src/optimizer/plan_node/batch_values.rs | 4 +- .../src/optimizer/plan_node/derive.rs | 16 +- .../optimizer/plan_node/eq_join_predicate.rs | 9 + .../src/optimizer/plan_node/generic/agg.rs | 248 +- .../plan_node/generic/dynamic_filter.rs | 8 +- .../src/optimizer/plan_node/generic/expand.rs | 49 + .../src/optimizer/plan_node/generic/filter.rs | 17 + .../optimizer/plan_node/generic/hop_window.rs | 173 +- .../src/optimizer/plan_node/generic/join.rs | 188 +- .../src/optimizer/plan_node/generic/mod.rs | 11 + .../optimizer/plan_node/generic/project.rs | 8 +- .../plan_node/generic/project_set.rs | 8 +- .../src/optimizer/plan_node/generic/scan.rs | 20 +- .../src/optimizer/plan_node/generic/share.rs | 5 + .../src/optimizer/plan_node/generic/source.rs | 50 +- .../src/optimizer/plan_node/generic/top_n.rs | 28 +- .../src/optimizer/plan_node/generic/union.rs | 5 + .../src/optimizer/plan_node/logical_agg.rs | 89 +- .../src/optimizer/plan_node/logical_apply.rs | 26 +- .../src/optimizer/plan_node/logical_expand.rs | 26 +- .../src/optimizer/plan_node/logical_filter.rs | 53 +- .../optimizer/plan_node/logical_hop_window.rs | 234 +- .../src/optimizer/plan_node/logical_join.rs | 407 +- .../optimizer/plan_node/logical_multi_join.rs | 239 + .../optimizer/plan_node/logical_over_agg.rs | 27 +- .../optimizer/plan_node/logical_project.rs | 32 +- .../plan_node/logical_project_set.rs | 43 +- .../src/optimizer/plan_node/logical_scan.rs | 161 +- .../src/optimizer/plan_node/logical_share.rs | 18 +- .../src/optimizer/plan_node/logical_source.rs | 95 +- .../src/optimizer/plan_node/logical_topn.rs | 16 +- .../src/optimizer/plan_node/logical_union.rs | 15 +- src/frontend/src/optimizer/plan_node/mod.rs | 31 +- .../src/optimizer/plan_node/plan_base.rs | 14 + .../src/optimizer/plan_node/stream.rs | 63 +- .../optimizer/plan_node/stream_delta_join.rs | 8 +- .../src/optimizer/plan_node/stream_derive.rs | 79 +- .../src/optimizer/plan_node/stream_dml.rs | 6 +- .../plan_node/stream_dynamic_filter.rs | 2 +- .../optimizer/plan_node/stream_exchange.rs | 80 +- .../src/optimizer/plan_node/stream_expand.rs | 6 +- .../src/optimizer/plan_node/stream_filter.rs | 6 +- .../plan_node/stream_global_simple_agg.rs | 8 +- .../optimizer/plan_node/stream_group_topn.rs | 8 +- .../optimizer/plan_node/stream_hash_agg.rs | 7 +- .../optimizer/plan_node/stream_hash_join.rs | 8 +- .../optimizer/plan_node/stream_hop_window.rs | 10 +- .../optimizer/plan_node/stream_index_scan.rs | 225 - .../plan_node/stream_local_simple_agg.rs | 6 +- .../optimizer/plan_node/stream_materialize.rs | 13 +- .../src/optimizer/plan_node/stream_now.rs | 2 +- .../src/optimizer/plan_node/stream_project.rs | 6 +- .../optimizer/plan_node/stream_project_set.rs | 6 +- .../optimizer/plan_node/stream_row_id_gen.rs | 17 +- .../src/optimizer/plan_node/stream_share.rs | 14 +- .../src/optimizer/plan_node/stream_sink.rs | 117 +- .../src/optimizer/plan_node/stream_source.rs | 10 +- .../optimizer/plan_node/stream_table_scan.rs | 130 +- .../plan_node/stream_temporal_join.rs | 235 + .../src/optimizer/plan_node/stream_topn.rs | 8 +- .../src/optimizer/plan_node/stream_union.rs | 6 +- .../plan_node/stream_watermark_filter.rs | 10 +- .../src/optimizer/plan_node/to_prost.rs | 14 +- src/frontend/src/optimizer/plan_node/utils.rs | 28 +- .../plan_visitor/max_one_row_visitor.rs | 12 +- .../src/optimizer/plan_visitor/mod.rs | 2 + .../plan_visitor/plan_correlated_id_finder.rs | 20 +- .../plan_visitor/temporal_join_validator.rs | 51 + .../src/optimizer/property/distribution.rs | 13 +- src/frontend/src/optimizer/property/order.rs | 279 +- .../rule/always_false_filter_rule.rs | 58 + .../rule/apply_agg_transpose_rule.rs | 16 +- .../rule/apply_filter_transpose_rule.rs | 55 +- .../optimizer/rule/apply_offset_rewriter.rs | 71 + .../rule/apply_project_transpose_rule.rs | 46 +- .../optimizer/rule/index_delta_join_rule.rs | 4 +- .../optimizer/rule/index_selection_rule.rs | 48 +- ...s => left_deep_tree_join_ordering_rule.rs} | 8 +- .../optimizer/rule/min_max_on_index_rule.rs | 166 +- src/frontend/src/optimizer/rule/mod.rs | 16 +- .../optimizer/rule/over_agg_to_topn_rule.rs | 10 +- .../stream/bushy_tree_join_ordering_rule.rs | 39 + src/frontend/src/optimizer/rule/stream/mod.rs | 2 + .../rule/stream/stream_project_merge_rule.rs | 46 + .../src/optimizer/rule/top_n_on_index_rule.rs | 107 +- src/frontend/src/planner/query.rs | 6 +- src/frontend/src/planner/relation.rs | 53 +- src/frontend/src/planner/select.rs | 9 +- src/frontend/src/planner/set_expr.rs | 4 +- .../src/scheduler/distributed/query.rs | 34 +- .../src/scheduler/distributed/stage.rs | 44 +- src/frontend/src/scheduler/local.rs | 35 +- src/frontend/src/scheduler/plan_fragmenter.rs | 19 +- .../src/scheduler/worker_node_manager.rs | 10 + src/frontend/src/session.rs | 57 +- .../stream_fragmenter/graph/fragment_graph.rs | 9 +- src/frontend/src/stream_fragmenter/mod.rs | 21 +- .../stream_fragmenter/rewrite/delta_join.rs | 4 +- src/frontend/src/telemetry.rs | 91 + src/frontend/src/test_utils.rs | 69 +- src/frontend/src/user/user_privilege.rs | 38 +- .../src/utils/column_index_mapping.rs | 33 +- src/frontend/src/utils/rewrite_index.rs | 8 +- .../src/utils/stream_graph_formatter.rs | 397 +- src/java_binding/Cargo.toml | 6 + src/java_binding/make-java-binding.toml | 22 + src/java_binding/run_demo.sh | 9 +- .../src/bin/data-chunk-payload-generator.rs | 78 + .../src/{iterator.rs => hummock_iterator.rs} | 59 +- src/java_binding/src/lib.rs | 134 +- src/java_binding/src/stream_chunk_iterator.rs | 58 + src/meta/Cargo.toml | 3 + src/meta/src/backup_restore/backup_manager.rs | 153 +- .../backup_restore/meta_snapshot_builder.rs | 1 - src/meta/src/backup_restore/restore.rs | 1 + src/meta/src/barrier/command.rs | 28 +- src/meta/src/barrier/mod.rs | 8 +- src/meta/src/barrier/progress.rs | 2 +- src/meta/src/barrier/recovery.rs | 15 + src/meta/src/dashboard/mod.rs | 4 +- src/meta/src/error.rs | 15 +- .../hummock/compaction/compaction_config.rs | 29 +- .../src/hummock/compaction/level_selector.rs | 21 +- src/meta/src/hummock/compaction/mod.rs | 65 +- .../picker/base_level_compaction_picker.rs | 54 +- .../picker/manual_compaction_picker.rs | 42 +- .../picker/min_overlap_compaction_picker.rs | 18 +- .../picker/space_reclaim_compaction_picker.rs | 17 +- .../picker/tier_compaction_picker.rs | 21 +- .../picker/ttl_reclaim_compaction_picker.rs | 16 +- .../src/hummock/compaction_schedule_policy.rs | 108 +- src/meta/src/hummock/compaction_scheduler.rs | 10 +- src/meta/src/hummock/compactor_manager.rs | 48 +- src/meta/src/hummock/error.rs | 6 +- src/meta/src/hummock/level_handler.rs | 6 +- src/meta/src/hummock/manager/compaction.rs | 3 +- .../manager/compaction_group_manager.rs | 294 +- src/meta/src/hummock/manager/context.rs | 4 +- src/meta/src/hummock/manager/gc.rs | 49 +- src/meta/src/hummock/manager/mod.rs | 360 +- src/meta/src/hummock/manager/tests.rs | 372 +- src/meta/src/hummock/manager/versioning.rs | 289 +- .../src/hummock/mock_hummock_meta_client.rs | 20 +- .../hummock/model/compact_task_assignment.rs | 6 +- .../hummock/model/compaction_group_config.rs | 6 +- .../src/hummock/model/compaction_status.rs | 6 +- src/meta/src/hummock/model/pinned_snapshot.rs | 6 +- src/meta/src/hummock/model/pinned_version.rs | 6 +- src/meta/src/hummock/model/version.rs | 6 +- src/meta/src/hummock/model/version_delta.rs | 6 +- src/meta/src/hummock/model/version_stats.rs | 6 +- src/meta/src/hummock/test_utils.rs | 58 +- src/meta/src/hummock/vacuum.rs | 132 +- src/meta/src/lib.rs | 109 +- src/meta/src/manager/catalog/connection.rs | 69 + src/meta/src/manager/catalog/database.rs | 32 + src/meta/src/manager/catalog/fragment.rs | 74 +- src/meta/src/manager/catalog/mod.rs | 860 +++- src/meta/src/manager/catalog/utils.rs | 371 ++ src/meta/src/manager/env.rs | 15 + src/meta/src/manager/id.rs | 6 + src/meta/src/manager/notification.rs | 56 +- src/meta/src/manager/streaming_job.rs | 12 - src/meta/src/manager/system_param/mod.rs | 6 +- src/meta/src/model/catalog.rs | 6 +- src/meta/src/model/cluster.rs | 12 +- src/meta/src/model/connection.rs | 41 + src/meta/src/model/error.rs | 10 +- src/meta/src/model/mod.rs | 14 +- src/meta/src/model/stream.rs | 24 +- src/meta/src/model/user.rs | 6 +- src/meta/src/rpc/cloud_provider.rs | 158 + src/meta/src/rpc/ddl_controller.rs | 84 +- src/meta/src/rpc/election_client.rs | 2 +- src/meta/src/rpc/metrics.rs | 10 + src/meta/src/rpc/mod.rs | 1 + src/meta/src/rpc/server.rs | 73 +- src/meta/src/rpc/service/ddl_service.rs | 198 +- src/meta/src/rpc/service/hummock_service.rs | 56 +- .../src/rpc/service/meta_member_service.rs | 10 +- src/meta/src/rpc/service/mod.rs | 1 + .../src/rpc/service/notification_service.rs | 5 + src/meta/src/rpc/service/telemetry_service.rs | 63 + src/meta/src/storage/etcd_meta_store.rs | 5 + src/meta/src/storage/mem_meta_store.rs | 5 + src/meta/src/storage/meta_store.rs | 3 + src/meta/src/stream/mod.rs | 2 + src/meta/src/stream/scale.rs | 59 +- src/meta/src/stream/sink.rs | 36 + src/meta/src/stream/source_manager.rs | 10 +- src/meta/src/stream/stream_graph/actor.rs | 15 +- src/meta/src/stream/stream_graph/fragment.rs | 72 +- src/meta/src/stream/stream_graph/schedule.rs | 14 +- src/meta/src/stream/stream_manager.rs | 8 +- src/meta/src/stream/test_fragmenter.rs | 33 +- src/meta/src/stream/test_scale.rs | 2 +- src/meta/src/telemetry.rs | 160 + src/object_store/Cargo.toml | 2 +- src/object_store/src/object/mod.rs | 18 + .../src/object/opendal_engine/azblob.rs | 44 + .../src/object/opendal_engine/fs.rs | 34 + .../src/object/opendal_engine/gcs.rs | 2 +- .../src/object/opendal_engine/mod.rs | 4 + .../opendal_engine/opendal_object_store.rs | 39 +- .../src/object/opendal_engine/oss.rs | 2 +- .../src/object/opendal_engine/webhdfs.rs | 2 +- src/prost/Cargo.toml | 1 + src/prost/build.rs | 2 + src/prost/helpers/src/generate.rs | 10 +- src/prost/src/lib.rs | 6 +- src/risedevtool/connector.toml | 6 +- src/risedevtool/src/compose.rs | 18 +- src/risedevtool/src/service_config.rs | 14 +- src/risedevtool/src/task/compactor_service.rs | 35 +- .../src/task/compute_node_service.rs | 66 +- src/risedevtool/src/task/connector_service.rs | 4 +- src/risedevtool/src/task/etcd_service.rs | 2 - src/risedevtool/src/task/meta_node_service.rs | 61 +- src/risedevtool/src/task/utils.rs | 14 +- src/rpc_client/src/compute_client.rs | 6 +- src/rpc_client/src/connector_client.rs | 99 +- src/rpc_client/src/hummock_meta_client.rs | 13 +- src/rpc_client/src/meta_client.rs | 123 +- src/source/benches/json_parser.rs | 104 +- src/source/src/connector_source.rs | 6 +- src/source/src/lib.rs | 1 - src/source/src/source_desc.rs | 39 +- src/source/src/table.rs | 4 +- src/sqlparser/examples/parse.rs | 2 +- src/sqlparser/src/ast/ddl.rs | 54 + src/sqlparser/src/ast/mod.rs | 107 +- src/sqlparser/src/ast/query.rs | 10 +- src/sqlparser/src/ast/statement.rs | 2 +- src/sqlparser/src/parser.rs | 418 +- src/sqlparser/src/test_utils.rs | 3 +- src/sqlparser/src/tokenizer.rs | 77 +- src/sqlparser/test_runner/Cargo.toml | 16 + src/sqlparser/test_runner/sqlparser_test.toml | 55 + src/sqlparser/test_runner/src/bin/apply.rs | 121 + src/sqlparser/test_runner/src/lib.rs | 33 +- src/sqlparser/tests/sqlparser_common.rs | 254 +- src/sqlparser/tests/sqlparser_postgres.rs | 117 +- src/sqlparser/tests/testdata/.gitignore | 1 + src/sqlparser/tests/testdata/alter.yaml | 4 +- src/sqlparser/tests/testdata/array.yaml | 25 +- src/sqlparser/tests/testdata/create.yaml | 76 +- src/sqlparser/tests/testdata/drop.yaml | 11 +- src/sqlparser/tests/testdata/drop_index.yaml | 4 +- src/sqlparser/tests/testdata/insert.yaml | 7 +- src/sqlparser/tests/testdata/precedence.yaml | 20 +- src/sqlparser/tests/testdata/privilege.yaml | 32 +- src/sqlparser/tests/testdata/select.yaml | 83 +- src/sqlparser/tests/testdata/set.yaml | 17 + src/sqlparser/tests/testdata/show.yaml | 60 +- src/sqlparser/tests/testdata/struct.yaml | 2 +- src/storage/Cargo.toml | 11 +- .../backup/integration_tests/common.sh | 4 - .../backup/integration_tests/run_all.sh | 1 + .../backup/integration_tests/test_basic.sh | 1 - .../integration_tests/test_set_config.sh | 79 + src/storage/backup/src/lib.rs | 18 +- src/storage/backup/src/storage.rs | 1 + src/storage/benches/bench_block_iter.rs | 53 +- src/storage/benches/bench_compactor.rs | 26 +- src/storage/benches/bench_lru_cache.rs | 38 +- src/storage/benches/bench_multi_builder.rs | 2 +- src/storage/compactor/Cargo.toml | 3 + .../compactor_observer/observer_manager.rs | 26 +- src/storage/compactor/src/lib.rs | 23 +- src/storage/compactor/src/server.rs | 42 +- src/storage/compactor/src/telemetry.rs | 75 + src/storage/hummock_sdk/src/compact.rs | 12 +- .../compaction_group/hummock_version_ext.rs | 393 +- .../hummock_sdk/src/filter_key_extractor.rs | 85 +- src/storage/hummock_sdk/src/key.rs | 48 +- src/storage/hummock_sdk/src/lib.rs | 27 +- src/storage/hummock_sdk/src/table_stats.rs | 22 +- .../hummock_test/src/compactor_tests.rs | 20 +- .../src/hummock_read_version_tests.rs | 18 +- .../src/local_version_manager_tests.rs | 40 +- .../src/mock_notification_client.rs | 5 + .../hummock_test/src/state_store_tests.rs | 54 +- .../hummock_test/src/sync_point_tests.rs | 90 +- src/storage/hummock_test/src/test_utils.rs | 9 +- src/storage/hummock_test/src/vacuum_tests.rs | 8 +- src/storage/src/hummock/backup_reader.rs | 98 +- src/storage/src/hummock/block_cache.rs | 34 +- .../src/hummock/compactor/compaction_utils.rs | 17 +- .../src/hummock/compactor/compactor_runner.rs | 15 +- src/storage/src/hummock/compactor/context.rs | 8 +- src/storage/src/hummock/compactor/iterator.rs | 292 +- src/storage/src/hummock/compactor/mod.rs | 90 +- src/storage/src/hummock/error.rs | 8 +- .../event_handler/hummock_event_handler.rs | 22 +- .../src/hummock/event_handler/uploader.rs | 6 +- .../src/hummock/hummock_meta_client.rs | 19 +- .../src/hummock/iterator/concat_inner.rs | 14 +- .../src/hummock/iterator/test_utils.rs | 20 +- src/storage/src/hummock/mod.rs | 33 +- src/storage/src/hummock/observer_manager.rs | 49 +- .../sstable/backward_sstable_iterator.rs | 18 +- src/storage/src/hummock/sstable/block.rs | 488 +- .../src/hummock/sstable/block_iterator.rs | 212 +- src/storage/src/hummock/sstable/builder.rs | 103 +- .../sstable/forward_sstable_iterator.rs | 22 +- src/storage/src/hummock/sstable/mod.rs | 26 +- .../src/hummock/sstable/multi_builder.rs | 16 +- .../src/hummock/sstable/sstable_id_manager.rs | 333 -- .../sstable/sstable_object_id_manager.rs | 359 ++ src/storage/src/hummock/sstable_store.rs | 156 +- src/storage/src/hummock/store/state_store.rs | 7 + src/storage/src/hummock/store/version.rs | 4 +- src/storage/src/hummock/test_utils.rs | 68 +- src/storage/src/hummock/utils.rs | 2 +- src/storage/src/hummock/vacuum.rs | 18 +- src/storage/src/hummock/validator.rs | 31 +- src/storage/src/hummock/value.rs | 15 +- src/storage/src/hummock/write_limiter.rs | 104 + src/storage/src/lib.rs | 1 + src/storage/src/monitor/compactor_metrics.rs | 13 +- src/storage/src/monitor/monitored_store.rs | 6 +- src/storage/src/opts.rs | 14 +- src/storage/src/store_impl.rs | 23 +- .../src/table/batch_table/storage_table.rs | 42 +- src/storage/src/table/mod.rs | 42 +- src/stream/Cargo.toml | 2 + src/stream/src/common/builder.rs | 9 +- src/stream/src/common/infallible_expr.rs | 56 - src/stream/src/common/mod.rs | 2 - src/stream/src/common/table/state_table.rs | 110 +- .../src/common/table/test_state_table.rs | 20 +- .../src/common/table/test_storage_table.rs | 8 +- src/stream/src/common/table/test_utils.rs | 14 +- src/stream/src/error.rs | 8 +- src/stream/src/executor/actor.rs | 56 +- .../src/executor/aggregation/agg_call.rs | 4 +- .../src/executor/aggregation/agg_group.rs | 233 +- .../src/executor/aggregation/distinct.rs | 6 +- src/stream/src/executor/aggregation/minput.rs | 68 +- src/stream/src/executor/aggregation/mod.rs | 4 +- src/stream/src/executor/aggregation/value.rs | 4 +- src/stream/src/executor/backfill.rs | 48 +- src/stream/src/executor/barrier_recv.rs | 107 + src/stream/src/executor/chain.rs | 55 +- src/stream/src/executor/dispatch.rs | 78 +- src/stream/src/executor/dynamic_filter.rs | 39 +- src/stream/src/executor/error.rs | 6 +- src/stream/src/executor/filter.rs | 102 +- src/stream/src/executor/global_simple_agg.rs | 64 +- src/stream/src/executor/hash_agg.rs | 133 +- src/stream/src/executor/hash_join.rs | 165 +- src/stream/src/executor/hop_window.rs | 365 +- src/stream/src/executor/integration_tests.rs | 15 +- src/stream/src/executor/local_simple_agg.rs | 39 +- src/stream/src/executor/lookup.rs | 4 + src/stream/src/executor/lookup/cache.rs | 5 + src/stream/src/executor/lookup/impl_.rs | 43 +- src/stream/src/executor/lookup/sides.rs | 13 +- src/stream/src/executor/lookup/tests.rs | 52 +- .../src/executor/managed_state/join/mod.rs | 61 +- .../managed_state/top_n/top_n_state.rs | 17 +- src/stream/src/executor/merge.rs | 15 + src/stream/src/executor/mod.rs | 118 +- .../src/executor/monitor/streaming_stats.rs | 16 +- src/stream/src/executor/mview/materialize.rs | 32 +- src/stream/src/executor/mview/test_utils.rs | 4 +- src/stream/src/executor/project.rs | 213 +- src/stream/src/executor/project_set.rs | 30 +- src/stream/src/executor/rearranged_chain.rs | 55 +- src/stream/src/executor/row_id_gen.rs | 31 +- src/stream/src/executor/simple.rs | 95 - src/stream/src/executor/sink.rs | 2 +- src/stream/src/executor/sort.rs | 6 +- src/stream/src/executor/sort_buffer.rs | 2 +- .../src/executor/source/source_executor.rs | 69 +- .../executor/source/state_table_handler.rs | 46 +- src/stream/src/executor/subtask.rs | 23 +- src/stream/src/executor/temporal_join.rs | 306 ++ src/stream/src/executor/test_utils.rs | 10 +- src/stream/src/executor/top_n/group_top_n.rs | 67 +- .../executor/top_n/group_top_n_appendonly.rs | 33 +- .../src/executor/top_n/top_n_appendonly.rs | 33 +- src/stream/src/executor/top_n/top_n_cache.rs | 140 +- src/stream/src/executor/top_n/top_n_plain.rs | 141 +- src/stream/src/executor/top_n/utils.rs | 12 +- src/stream/src/executor/watermark_filter.rs | 44 +- src/stream/src/executor/wrapper.rs | 2 + .../src/executor/wrapper/schema_check.rs | 20 +- src/stream/src/from_proto/agg_common.rs | 13 +- src/stream/src/from_proto/barrier_recv.rs | 49 + src/stream/src/from_proto/batch_query.rs | 8 +- src/stream/src/from_proto/chain.rs | 84 +- src/stream/src/from_proto/group_top_n.rs | 14 +- .../src/from_proto/group_top_n_appendonly.rs | 14 +- src/stream/src/from_proto/lookup.rs | 13 +- src/stream/src/from_proto/mod.rs | 10 +- src/stream/src/from_proto/mview.rs | 6 +- src/stream/src/from_proto/project.rs | 8 +- src/stream/src/from_proto/sink.rs | 6 +- src/stream/src/from_proto/source.rs | 12 +- src/stream/src/from_proto/temporal_join.rs | 217 + src/stream/src/from_proto/top_n.rs | 14 +- src/stream/src/from_proto/top_n_appendonly.rs | 10 +- src/stream/src/lib.rs | 10 - src/stream/src/task/barrier_manager.rs | 4 +- src/stream/src/task/env.rs | 17 +- src/stream/src/task/stream_manager.rs | 27 +- .../src/compaction_test_runner.rs | 30 +- .../src/delete_range_runner.rs | 16 +- src/tests/e2e_extended_mode/Cargo.toml | 29 + src/tests/e2e_extended_mode/README.md | 21 + src/tests/e2e_extended_mode/src/main.rs | 48 + src/tests/e2e_extended_mode/src/opts.rs | 33 + src/tests/e2e_extended_mode/src/test.rs | 334 ++ src/tests/regress/data/sql/interval.sql | 70 +- src/tests/simulation/Cargo.toml | 3 + src/tests/simulation/src/client.rs | 123 +- src/tests/simulation/src/cluster.rs | 112 +- src/tests/simulation/src/ctl_ext.rs | 24 +- src/tests/simulation/src/kafka.rs | 6 +- .../simulation/src/nexmark/create_source.sql | 24 +- src/tests/simulation/src/risingwave.toml | 5 +- src/tests/simulation/src/slt.rs | 25 +- .../src/{utils.rs => utils/assert_result.rs} | 0 src/tests/simulation/src/utils/mod.rs | 19 + .../simulation/src/utils/timed_future.rs | 76 + .../tests/it/cascade_materialized_view.rs | 46 +- src/tests/simulation/tests/it/delta_join.rs | 123 + .../simulation/tests/it/dynamic_filter.rs | 51 +- src/tests/simulation/tests/it/main.rs | 4 + .../simulation/tests/it/nexmark_chaos.rs | 18 +- .../simulation/tests/it/nexmark_recovery.rs | 84 + .../tests/it/singleton_migration.rs | 19 +- src/tests/simulation/tests/it/sink.rs | 4 +- .../tests/it/streaming_parallelism.rs | 94 +- src/tests/sqlsmith/Cargo.toml | 1 + src/tests/sqlsmith/scripts/gen_queries.sh | 63 +- src/tests/sqlsmith/src/lib.rs | 6 +- src/tests/sqlsmith/src/reducer.rs | 407 ++ src/tests/sqlsmith/src/runner.rs | 18 +- src/tests/sqlsmith/src/sql_gen/relation.rs | 1 + src/tests/sqlsmith/src/sql_gen/scalar.rs | 14 +- src/tests/sqlsmith/src/sql_gen/types.rs | 1 + src/tests/sqlsmith/src/sql_gen/utils.rs | 1 + src/tests/sqlsmith/src/utils.rs | 43 + src/tests/state_cleaning_test/Cargo.toml | 37 + src/tests/state_cleaning_test/README.md | 18 + src/tests/state_cleaning_test/data/agg.toml | 42 + src/tests/state_cleaning_test/data/join.toml | 54 + .../data/temporal_filter.toml | 45 + src/tests/state_cleaning_test/src/bin/main.rs | 232 + src/udf/README.md | 20 - src/udf/python/README.md | 75 + src/udf/python/example.py | 19 +- src/udf/python/risingwave/udf.py | 123 +- src/udf/python/setup.py | 21 + src/udf/src/lib.rs | 5 +- src/utils/pgwire/Cargo.toml | 2 +- src/utils/pgwire/src/pg_extended.rs | 4 +- src/utils/pgwire/src/pg_protocol.rs | 30 +- src/utils/pgwire/src/pg_response.rs | 5 + src/utils/pgwire/tests/js/test/pgwire.test.ts | 27 +- src/utils/runtime/src/lib.rs | 1 + src/workspace-hack/Cargo.toml | 22 +- 1245 files changed, 52589 insertions(+), 47338 deletions(-) create mode 100644 .github/workflows/intergration_tests.yml create mode 100755 ci/scripts/e2e-test-parallel-for-opendal.sh create mode 100755 ci/scripts/s3-source-test-for-opendal-fs-engine.sh delete mode 100644 dashboard/proto/gen/backup_service.ts delete mode 100644 dashboard/proto/gen/batch_plan.ts delete mode 100644 dashboard/proto/gen/catalog.ts delete mode 100644 dashboard/proto/gen/common.ts delete mode 100644 dashboard/proto/gen/compactor.ts delete mode 100644 dashboard/proto/gen/compute.ts delete mode 100644 dashboard/proto/gen/connector_service.ts delete mode 100644 dashboard/proto/gen/data.ts delete mode 100644 dashboard/proto/gen/ddl_service.ts delete mode 100644 dashboard/proto/gen/expr.ts delete mode 100644 dashboard/proto/gen/health.ts delete mode 100644 dashboard/proto/gen/hummock.ts delete mode 100644 dashboard/proto/gen/java_binding.ts delete mode 100644 dashboard/proto/gen/meta.ts delete mode 100644 dashboard/proto/gen/monitor_service.ts delete mode 100644 dashboard/proto/gen/order.ts delete mode 100644 dashboard/proto/gen/plan_common.ts delete mode 100644 dashboard/proto/gen/source.ts delete mode 100644 dashboard/proto/gen/stream_plan.ts delete mode 100644 dashboard/proto/gen/stream_service.ts delete mode 100644 dashboard/proto/gen/task_service.ts delete mode 100644 dashboard/proto/gen/user.ts create mode 100644 e2e_test/batch/functions/pi.slt.part create mode 100644 e2e_test/ddl/alter_rename_relation.slt create mode 100644 e2e_test/ddl/table/generated_columns.slt.part create mode 100644 e2e_test/ddl/table/table.slt.part rename e2e_test/{extended_query => extended_mode}/basic.slt (54%) create mode 100644 e2e_test/extended_mode/type.slt create mode 100644 e2e_test/s3/run_csv.py create mode 100644 e2e_test/sink/remote/mysql_create_table.sql create mode 100644 e2e_test/sink/remote/mysql_expected_result.tsv create mode 100644 e2e_test/sink/remote/pg_create_table.sql create mode 100644 e2e_test/streaming/bug_fixes/issue_8084.slt create mode 100644 e2e_test/streaming/bug_fixes/issue_8570.slt create mode 100644 e2e_test/streaming/temporal_join.slt create mode 100644 integration_tests/README.md create mode 100644 integration_tests/ad-click/create_mv.sql create mode 100644 integration_tests/ad-click/create_source.sql create mode 100644 integration_tests/ad-click/data_check create mode 100644 integration_tests/ad-click/docker-compose.yml create mode 100644 integration_tests/ad-click/query.sql create mode 100644 integration_tests/ad-ctr/create_mv.sql create mode 100644 integration_tests/ad-ctr/create_source.sql create mode 100644 integration_tests/ad-ctr/data_check create mode 100644 integration_tests/ad-ctr/docker-compose.yml create mode 100644 integration_tests/ad-ctr/query.sql create mode 100644 integration_tests/cdn-metrics/create_mv.sql create mode 100644 integration_tests/cdn-metrics/create_source.sql create mode 100644 integration_tests/cdn-metrics/data_check create mode 100644 integration_tests/cdn-metrics/docker-compose.yml create mode 100644 integration_tests/cdn-metrics/query.sql create mode 100644 integration_tests/clickstream/create_mv.sql create mode 100644 integration_tests/clickstream/create_source.sql create mode 100644 integration_tests/clickstream/data_check create mode 100644 integration_tests/clickstream/docker-compose.yml create mode 100644 integration_tests/clickstream/query.sql create mode 100644 integration_tests/datagen/.gitignore create mode 100644 integration_tests/datagen/.goreleaser.yaml create mode 100644 integration_tests/datagen/Dockerfile create mode 100644 integration_tests/datagen/ad_click/ad_click.go create mode 100644 integration_tests/datagen/ad_ctr/ad_ctr.go create mode 100644 integration_tests/datagen/cdn_metrics/cdn_metrics.go create mode 100644 integration_tests/datagen/cdn_metrics/nics.go create mode 100644 integration_tests/datagen/cdn_metrics/tcp.go create mode 100644 integration_tests/datagen/clickstream/clickstream.go create mode 100644 integration_tests/datagen/delivery/delivery.go create mode 100644 integration_tests/datagen/ecommerce/ecommerce.go create mode 100644 integration_tests/datagen/gen/generator.go create mode 100644 integration_tests/datagen/go.mod create mode 100644 integration_tests/datagen/go.sum create mode 100644 integration_tests/datagen/livestream/livestream.go create mode 100644 integration_tests/datagen/livestream/proto/livestream.pb.go create mode 100644 integration_tests/datagen/load_gen.go create mode 100644 integration_tests/datagen/main.go create mode 100644 integration_tests/datagen/nexmark/auction.go create mode 100644 integration_tests/datagen/sink/kafka/kafka.go create mode 100644 integration_tests/datagen/sink/kinesis/kinesis.go create mode 100644 integration_tests/datagen/sink/mysql/mysql.go create mode 100644 integration_tests/datagen/sink/postgres/postgres.go create mode 100644 integration_tests/datagen/sink/pulsar/pulsar.go create mode 100644 integration_tests/datagen/sink/sink.go create mode 100644 integration_tests/datagen/twitter/avro.go create mode 100644 integration_tests/datagen/twitter/proto/twitter.pb.go create mode 100644 integration_tests/datagen/twitter/twitter.go create mode 100644 integration_tests/datagen/twitter/twitter_example.json create mode 100644 integration_tests/delivery/delivery.sql create mode 100644 integration_tests/delivery/docker-compose.yml create mode 100644 integration_tests/ecommerce/ecommerce.sql create mode 100644 integration_tests/iceberg-sink/README.md create mode 100644 integration_tests/iceberg-sink/create_mv.sql create mode 100644 integration_tests/iceberg-sink/create_sink.sql create mode 100644 integration_tests/iceberg-sink/create_source.sql create mode 100644 integration_tests/iceberg-sink/docker-compose.yml create mode 100644 integration_tests/iceberg-sink/iceberg-query.sql create mode 100644 integration_tests/iceberg-sink/mysql_prepare.sql create mode 100644 integration_tests/iceberg-sink/presto-with-iceberg/Dockerfile create mode 100644 integration_tests/iceberg-sink/presto-with-iceberg/hadoop-catalog.xml create mode 100644 integration_tests/iceberg-sink/presto-with-iceberg/iceberg.properties create mode 100644 integration_tests/iceberg-sink/presto-with-iceberg/log.properties create mode 100644 integration_tests/iceberg-sink/spark-script/.gitignore create mode 100644 integration_tests/iceberg-sink/spark-script/create-table.sql create mode 100644 integration_tests/iceberg-sink/spark-script/query-table.sql create mode 100644 integration_tests/iceberg-sink/spark-script/run-sql-file.sh create mode 100644 integration_tests/livestream/create_mv.sql create mode 100644 integration_tests/livestream/create_source.sql create mode 100644 integration_tests/livestream/data_check create mode 100644 integration_tests/livestream/docker-compose.yml create mode 100644 integration_tests/livestream/livestream.proto create mode 100644 integration_tests/livestream/pb/create_mv.sql create mode 100644 integration_tests/livestream/pb/create_source.sql create mode 100644 integration_tests/livestream/query.sql create mode 100644 integration_tests/livestream/schema create mode 100644 integration_tests/mysql-cdc/create_mv.sql create mode 100644 integration_tests/mysql-cdc/create_source.sql create mode 100644 integration_tests/mysql-cdc/data_check create mode 100644 integration_tests/mysql-cdc/docker-compose.yml create mode 100644 integration_tests/mysql-cdc/mysql_prepare.sql create mode 100644 integration_tests/mysql-cdc/query.sql create mode 100644 integration_tests/mysql-sink/create_mv.sql create mode 100644 integration_tests/mysql-sink/create_source.sql create mode 100644 integration_tests/mysql-sink/data_check create mode 100644 integration_tests/mysql-sink/docker-compose.yml create mode 100644 integration_tests/mysql-sink/mysql_prepare.sql create mode 100644 integration_tests/mysql-sink/query.sql create mode 100644 integration_tests/postgres-cdc/create_mv.sql create mode 100644 integration_tests/postgres-cdc/create_source.sql create mode 100644 integration_tests/postgres-cdc/data_check create mode 100644 integration_tests/postgres-cdc/docker-compose.yml create mode 100644 integration_tests/postgres-cdc/postgres_prepare.sql create mode 100644 integration_tests/postgres-cdc/query.sql create mode 100644 integration_tests/postgres-sink/README.md create mode 100644 integration_tests/postgres-sink/create_mv.sql create mode 100644 integration_tests/postgres-sink/create_source.sql create mode 100644 integration_tests/postgres-sink/data_check create mode 100644 integration_tests/postgres-sink/docker-compose.yml create mode 100644 integration_tests/postgres-sink/postgres_prepare.sql create mode 100644 integration_tests/postgres-sink/query.sql create mode 100644 integration_tests/prometheus/create_mv.sql create mode 100644 integration_tests/prometheus/create_source.sql create mode 100644 integration_tests/prometheus/create_user.sql create mode 100644 integration_tests/prometheus/data_check create mode 100644 integration_tests/prometheus/docker-compose.yml create mode 100644 integration_tests/prometheus/prometheus.yaml create mode 100644 integration_tests/prometheus/query.sql create mode 100644 integration_tests/schema-registry/create_mv.sql create mode 100644 integration_tests/schema-registry/create_source.sql create mode 100644 integration_tests/schema-registry/data_check create mode 100644 integration_tests/schema-registry/datagen.py create mode 100644 integration_tests/schema-registry/docker-compose.yml create mode 100644 integration_tests/schema-registry/query.sql create mode 100644 integration_tests/schema-registry/readme.md create mode 100644 integration_tests/scripts/.gitignore create mode 100644 integration_tests/scripts/check_data.py create mode 100644 integration_tests/scripts/gen_pb_compose.py create mode 100644 integration_tests/scripts/run_demos.py create mode 100644 integration_tests/superset/create_mv.sql create mode 100644 integration_tests/superset/create_source.sql create mode 100644 integration_tests/superset/docker-compose.yml create mode 100644 integration_tests/superset/docker/.env-non-dev create mode 100755 integration_tests/superset/docker/docker-bootstrap.sh create mode 100755 integration_tests/superset/docker/docker-init.sh create mode 100644 integration_tests/superset/docker/pythonpath_dev/.gitignore create mode 100644 integration_tests/superset/docker/pythonpath_dev/superset_config.py create mode 100644 integration_tests/superset/docker/requirements-local.txt create mode 100644 integration_tests/superset/docker/run-server.sh create mode 100644 integration_tests/superset/query.sql create mode 100644 integration_tests/tidb-cdc-sink/config/changefeed.toml create mode 100644 integration_tests/tidb-cdc-sink/config/pd.toml create mode 100644 integration_tests/tidb-cdc-sink/config/tidb.toml create mode 100644 integration_tests/tidb-cdc-sink/config/tikv.toml create mode 100644 integration_tests/tidb-cdc-sink/create_mv.sql create mode 100644 integration_tests/tidb-cdc-sink/create_source.sql create mode 100644 integration_tests/tidb-cdc-sink/data_check create mode 100644 integration_tests/tidb-cdc-sink/docker-compose.yml create mode 100644 integration_tests/tidb-cdc-sink/query.sql create mode 100644 integration_tests/tidb-cdc-sink/tidb_create_tables.sql create mode 100644 integration_tests/twitter-pulsar/create_mv.sql create mode 100644 integration_tests/twitter-pulsar/create_source.sql create mode 100644 integration_tests/twitter-pulsar/docker-compose.yml create mode 100644 integration_tests/twitter-pulsar/query.sql create mode 100644 integration_tests/twitter/avro.json create mode 100644 integration_tests/twitter/avro/create_mv.sql create mode 100644 integration_tests/twitter/avro/create_source.sql create mode 100644 integration_tests/twitter/create_mv.sql create mode 100644 integration_tests/twitter/create_source.sql create mode 100644 integration_tests/twitter/data_check create mode 100644 integration_tests/twitter/docker-compose.yml create mode 100644 integration_tests/twitter/pb/create_mv.sql create mode 100644 integration_tests/twitter/pb/create_source.sql create mode 100644 integration_tests/twitter/query.sql create mode 100644 integration_tests/twitter/schema create mode 100644 integration_tests/twitter/twitter.proto create mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java delete mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkrow.java create mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/CloseableIterator.java create mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Deserializer.java create mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/TrivialCloseIterator.java delete mode 100644 java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/ConnectorConfig.java delete mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/Deserializer.java create mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java create mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DbzConnectorConfig.java delete mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DebeziumCdcUtils.java rename java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/{DefaultCdcEngine.java => DbzCdcEngine.java} (50%) rename java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/{DefaultCdcEngineRunner.java => DbzCdcEngineRunner.java} (52%) rename java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/{CdcEventConsumer.java => DbzCdcEventConsumer.java} (85%) rename java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/{DefaultSourceHandler.java => DbzSourceHandler.java} (59%) delete mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/mysql/MySqlSourceConfig.java delete mode 100644 java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/postgres/PostgresSourceConfig.java create mode 100644 java/connector-node/risingwave-connector-service/src/main/resources/debezium.properties delete mode 100644 java/connector-node/risingwave-connector-service/src/main/resources/log4j.properties create mode 100644 java/connector-node/risingwave-connector-service/src/main/resources/log4j2.properties create mode 100644 java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties create mode 100644 java/connector-node/risingwave-connector-service/src/main/resources/postgres.properties create mode 100644 java/connector-node/risingwave-source-test/pom.xml create mode 100644 java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/MySQLSourceTest.java create mode 100644 java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/PostgresSourceTest.java create mode 100644 java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/SourceTestClient.java create mode 100644 java/connector-node/risingwave-source-test/src/test/resources/my.cnf create mode 100644 java/connector-node/risingwave-source-test/src/test/resources/orders.tbl create mode 100644 java/connector-node/risingwave-source-test/src/test/resources/stored_queries.properties rename java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/{Demo.java => HummockReadDemo.java} (61%) create mode 100644 java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java create mode 100644 java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Utils.java create mode 100644 java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java create mode 100644 java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java delete mode 100644 java/java-binding/src/main/java/com/risingwave/java/binding/Iterator.java create mode 100644 java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java create mode 100644 java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java create mode 100644 src/common/benches/bench_hash_key_encoding.rs create mode 100644 src/common/src/bin/default_config.rs create mode 100644 src/common/src/telemetry/manager.rs create mode 100644 src/common/src/telemetry/mod.rs create mode 100644 src/common/src/telemetry/report.rs delete mode 100644 src/common/src/util/encoding_for_comparison.rs create mode 100644 src/common/src/util/memcmp_encoding.rs rename src/{source/src => common/src/util}/row_id.rs (94%) create mode 100644 src/compute/src/telemetry.rs create mode 100644 src/connector/src/source/kafka/private_link.rs create mode 100644 src/ctl/src/cmd_impl/meta/connection.rs create mode 100644 src/expr/macro/Cargo.toml create mode 100644 src/expr/macro/src/gen.rs create mode 100644 src/expr/macro/src/lib.rs create mode 100644 src/expr/macro/src/parse.rs create mode 100644 src/expr/macro/src/types.rs create mode 100644 src/expr/macro/src/utils.rs create mode 100644 src/expr/src/expr/expr_array_length.rs delete mode 100644 src/expr/src/expr/expr_binary_bytes.rs create mode 100644 src/expr/src/expr/expr_now.rs delete mode 100644 src/expr/src/expr/expr_quaternary_bytes.rs delete mode 100644 src/expr/src/expr/expr_ternary_bytes.rs create mode 100644 src/expr/src/table_function/user_defined.rs delete mode 100644 src/expr/src/vector_op/ltrim.rs delete mode 100644 src/expr/src/vector_op/rtrim.rs create mode 100644 src/expr/src/vector_op/string.rs delete mode 100644 src/expr/src/vector_op/tests.rs delete mode 100644 src/expr/src/vector_op/trim_characters.rs create mode 100644 src/frontend/planner_test/tests/testdata/bushy_join.yaml create mode 100644 src/frontend/planner_test/tests/testdata/generated_columns.yaml create mode 100644 src/frontend/planner_test/tests/testdata/temporal_join.yaml create mode 100644 src/frontend/src/binder/bind_param.rs create mode 100644 src/frontend/src/expr/parameter.rs create mode 100644 src/frontend/src/handler/alter_relation_rename.rs create mode 100644 src/frontend/src/handler/extended_handle.rs delete mode 100644 src/frontend/src/optimizer/plan_node/stream_index_scan.rs create mode 100644 src/frontend/src/optimizer/plan_node/stream_temporal_join.rs create mode 100644 src/frontend/src/optimizer/plan_visitor/temporal_join_validator.rs create mode 100644 src/frontend/src/optimizer/rule/always_false_filter_rule.rs create mode 100644 src/frontend/src/optimizer/rule/apply_offset_rewriter.rs rename src/frontend/src/optimizer/rule/{reorder_multijoin_rule.rs => left_deep_tree_join_ordering_rule.rs} (95%) create mode 100644 src/frontend/src/optimizer/rule/stream/bushy_tree_join_ordering_rule.rs create mode 100644 src/frontend/src/optimizer/rule/stream/stream_project_merge_rule.rs create mode 100644 src/frontend/src/telemetry.rs create mode 100644 src/java_binding/src/bin/data-chunk-payload-generator.rs rename src/java_binding/src/{iterator.rs => hummock_iterator.rs} (77%) create mode 100644 src/java_binding/src/stream_chunk_iterator.rs create mode 100644 src/meta/src/manager/catalog/connection.rs create mode 100644 src/meta/src/manager/catalog/utils.rs create mode 100644 src/meta/src/model/connection.rs create mode 100644 src/meta/src/rpc/cloud_provider.rs create mode 100644 src/meta/src/rpc/service/telemetry_service.rs create mode 100644 src/meta/src/stream/sink.rs create mode 100644 src/meta/src/telemetry.rs create mode 100644 src/object_store/src/object/opendal_engine/azblob.rs create mode 100644 src/object_store/src/object/opendal_engine/fs.rs create mode 100644 src/sqlparser/test_runner/sqlparser_test.toml create mode 100644 src/sqlparser/test_runner/src/bin/apply.rs create mode 100644 src/sqlparser/tests/testdata/.gitignore create mode 100644 src/sqlparser/tests/testdata/set.yaml create mode 100644 src/storage/backup/integration_tests/test_set_config.sh create mode 100644 src/storage/compactor/src/telemetry.rs delete mode 100644 src/storage/src/hummock/sstable/sstable_id_manager.rs create mode 100644 src/storage/src/hummock/sstable/sstable_object_id_manager.rs create mode 100644 src/storage/src/hummock/write_limiter.rs delete mode 100644 src/stream/src/common/infallible_expr.rs create mode 100644 src/stream/src/executor/barrier_recv.rs delete mode 100644 src/stream/src/executor/simple.rs create mode 100644 src/stream/src/executor/temporal_join.rs create mode 100644 src/stream/src/from_proto/barrier_recv.rs create mode 100644 src/stream/src/from_proto/temporal_join.rs create mode 100644 src/tests/e2e_extended_mode/Cargo.toml create mode 100644 src/tests/e2e_extended_mode/README.md create mode 100644 src/tests/e2e_extended_mode/src/main.rs create mode 100644 src/tests/e2e_extended_mode/src/opts.rs create mode 100644 src/tests/e2e_extended_mode/src/test.rs rename src/tests/simulation/src/{utils.rs => utils/assert_result.rs} (100%) create mode 100644 src/tests/simulation/src/utils/mod.rs create mode 100644 src/tests/simulation/src/utils/timed_future.rs create mode 100644 src/tests/simulation/tests/it/delta_join.rs create mode 100644 src/tests/simulation/tests/it/nexmark_recovery.rs create mode 100644 src/tests/sqlsmith/src/reducer.rs create mode 100644 src/tests/sqlsmith/src/utils.rs create mode 100644 src/tests/state_cleaning_test/Cargo.toml create mode 100644 src/tests/state_cleaning_test/README.md create mode 100644 src/tests/state_cleaning_test/data/agg.toml create mode 100644 src/tests/state_cleaning_test/data/join.toml create mode 100644 src/tests/state_cleaning_test/data/temporal_filter.toml create mode 100644 src/tests/state_cleaning_test/src/bin/main.rs delete mode 100644 src/udf/README.md create mode 100644 src/udf/python/README.md create mode 100644 src/udf/python/setup.py diff --git a/.dockerignore b/.dockerignore index c5f51dc81c789..984013bc97e2a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,5 @@ # macOS -.DS_Store +**/*.DS_Store # Visual Studio Code .vscode/ diff --git a/.gitattributes b/.gitattributes index 6c73bee6f834e..becfd0ec71edf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,7 +2,5 @@ src/tests/regress/data/** linguist-vendored # source test data scripts/source/test_data/** linguist-vendored -# generated proto for dashboard -dashboard/proto/gen/** linguist-generated # generated grafana dashboard grafana/risingwave-dashboard.json linguist-generated diff --git a/.github/labeler.yml b/.github/labeler.yml index 632df45cd766d..ad26ac5632b4b 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -22,6 +22,8 @@ labels: title: "^test.*" - label: "component/doc" title: "^doc.*" +- label: "type/deprecate" + title: "^deprecate.*" - label: "user-facing-changes" negate: true diff --git a/.github/pr-title-checker-config.json b/.github/pr-title-checker-config.json index d372e023742be..2f50d46c2bf9a 100644 --- a/.github/pr-title-checker-config.json +++ b/.github/pr-title-checker-config.json @@ -4,7 +4,7 @@ "color": "B60205" }, "CHECKS": { - "regexp": "^(feat|fix|test|refactor|chore|style|doc|perf|build|ci|revert)(\\(.*\\))?:.*", + "regexp": "^(feat|fix|test|refactor|chore|style|doc|perf|build|ci|revert|deprecate)(\\(.*\\))?:.*", "ignoreLabels" : ["ignore-title"] } } diff --git a/.github/workflows/connector-node-integration.yml b/.github/workflows/connector-node-integration.yml index 1b0a429006e22..dce7651cadbbf 100644 --- a/.github/workflows/connector-node-integration.yml +++ b/.github/workflows/connector-node-integration.yml @@ -3,7 +3,7 @@ name: Connector Node Integration tests on: push: branches: [main] - path: [java/**, proto/**] + paths: [java/**, proto/**] pull_request: branches: [main] paths: [java/**, proto/**] diff --git a/.github/workflows/dashboard_main.yml b/.github/workflows/dashboard_main.yml index b20fb0956d4a8..ee7e670bf4ec8 100644 --- a/.github/workflows/dashboard_main.yml +++ b/.github/workflows/dashboard_main.yml @@ -1,8 +1,8 @@ name: Dashboard (main) on: push: - branches: [main] - paths: [dashboard/**] + branches: [ main ] + paths: [ dashboard/** ] jobs: dashboard-ui-deploy: runs-on: ubuntu-latest @@ -11,6 +11,10 @@ jobs: - uses: actions/setup-node@v2 with: node-version: '18' + - uses: arduino/setup-protoc@v1 + with: + version: "3.x" + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: build working-directory: ./dashboard run: | diff --git a/.github/workflows/intergration_tests.yml b/.github/workflows/intergration_tests.yml new file mode 100644 index 0000000000000..033425bbb9d57 --- /dev/null +++ b/.github/workflows/intergration_tests.yml @@ -0,0 +1,113 @@ +name: Integration Tests CI + +on: + schedule: + # Currently we build docker images at 12:00 (UTC), so run this at 13:00 + - cron: '0 13 * * *' + push: + branches: [main] + paths: [integration_tests/**] + pull_request: + branches: [main] + paths: [integration_tests/**] + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.18 + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + working-directory: integration_tests/datagen + args: --timeout=120s + - name: Go build + run: | + go mod tidy + git diff --exit-code go.mod go.sum + go build . + working-directory: integration_tests/datagen + run-demos: + strategy: + matrix: + testcase: + - ad-click + - ad-ctr + - cdn-metrics + - clickstream + - livestream + - twitter + - prometheus + - schema-registry + - mysql-cdc + - postgres-cdc + - mysql-sink + - postgres-sink + - iceberg-sink + format: ["json", "protobuf"] + exclude: + - testcase: ad-click + format: protobuf + - testcase: ad-ctr + format: protobuf + - testcase: cdn-metrics + format: protobuf + - testcase: clickstream + format: protobuf + - testcase: prometheus + format: protobuf + # This demo is showcasing avro + schema registry. So there's no file server for the schema file. + - testcase: schema-registry + format: protobuf + - testcase: mysql-cdc + format: protobuf + - testcase: postgres-cdc + format: protobuf + - testcase: mysql-sink + format: protobuf + - testcase: postgres-sink + format: protobuf + - testcase: iceberg-sink + format: protobuf + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + # In this step, this action saves a list of existing images, + # the cache is created without them in the post run. + # It also restores the cache if it exists. + - uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true + + - name: Rewrite docker compose for protobuf + working-directory: integration_tests/scripts + if: ${{ matrix.format == 'protobuf' }} + run: | + python3 gen_pb_compose.py ${{ matrix.testcase }} ${{ matrix.format }} + + - name: Run Demos + working-directory: integration_tests/scripts + run: | + python3 run_demos.py --case ${{ matrix.testcase }} --format ${{ matrix.format }} + + - name: Check if the ingestion is successful + working-directory: integration_tests/scripts + run: | + python3 check_data.py ${{ matrix.testcase }} + + - name: Dump logs on failure + if: ${{ failure() }} + working-directory: integration_tests/${{ matrix.testcase }} + run: | + docker compose logs + + - uses: satackey/action-docker-layer-caching@v0.0.11 + continue-on-error: true diff --git a/.github/workflows/typo.yml b/.github/workflows/typo.yml index 51f1f221b4fba..67684745cf2a4 100644 --- a/.github/workflows/typo.yml +++ b/.github/workflows/typo.yml @@ -10,4 +10,4 @@ jobs: uses: actions/checkout@v3 - name: Check spelling of the entire repository - uses: crate-ci/typos@v1.11.1 + uses: crate-ci/typos@v1.13.20 diff --git a/.gitignore b/.gitignore index 3ebf11628f38c..7766e090fb83c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # macOS -.DS_Store +**/*.DS_Store # Visual Studio Code .vscode/* @@ -49,6 +49,8 @@ src/log/ log/ +*.log + .risingwave/ .bin/ diff --git a/.licenserc.yaml b/.licenserc.yaml index 3589dbe6fd971..43e9315437ef8 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -8,10 +8,13 @@ header: - "dashboard/**/*.js" - "dashboard/**/*.ts" - "src/**/*.html" + - "java/**/*.java" + - "java/**/*.py" paths-ignore: - "**/gen/**" - "**/*.d.ts" - "src/sqlparser/**/*.rs" + - "java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/internal/*.java" comment: on-failure diff --git a/.typos.toml b/.typos.toml index e90c2bcf83eff..e6867ebfd3bf4 100644 --- a/.typos.toml +++ b/.typos.toml @@ -17,4 +17,6 @@ extend-exclude = [ "scripts", "src/frontend/planner_test/tests/testdata", "src/tests/sqlsmith/tests/freeze", + "**/go.mod", + "**/go.sum", ] diff --git a/Cargo.lock b/Cargo.lock index 1dea5f4876eb8..17247bffd21ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -85,9 +85,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" dependencies = [ "backtrace", ] @@ -333,12 +333,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -408,9 +407,9 @@ checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", @@ -445,32 +444,10 @@ dependencies = [ [[package]] name = "auto_enums" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0dfe45d75158751e195799f47ea02e81f570aa24bc5ef999cdd9e888c4b5c3" -dependencies = [ - "auto_enums_core", - "auto_enums_derive", -] - -[[package]] -name = "auto_enums_core" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da47c46001293a2c4b744d731958be22cff408a2ab76e2279328f9713b1267b4" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "auto_enums_derive" -version = "0.7.12" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aed1da83ecdc799503b7cb94da1b45a34d72b49caf40a61d9cf5b88ec07cfd" +checksum = "10143e1d6fc660ac7bfc268c6ec2f9699129a3cfbb241eed50393d1562e0a4ce" dependencies = [ - "autocfg", "derive_utils", "proc-macro2", "quote", @@ -569,6 +546,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-ec2" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77e41e0567b874c884661a1eb777f006679464110e6f95c7bafafe0fb607e10" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-query", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "http", + "tokio-stream", + "tower", +] + [[package]] name = "aws-sdk-kinesis" version = "0.21.0" @@ -861,13 +862,13 @@ dependencies = [ [[package]] name = "axum" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd379e511536bad07447f899300aa526e9bae8e6f66dc5e5ca45d7587b7c1ec" +checksum = "13d8068b6ccb8b34db9de397c7043f91db8b4c66414952c6db944f238c4d3db3" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "http", @@ -887,16 +888,15 @@ dependencies = [ "sync_wrapper", "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" +checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" dependencies = [ "async-trait", "bytes", @@ -974,7 +974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce860f38082f1544a557dfa447838143e1b0bfa061c0369e407ebadf640001d1" dependencies = [ "bcc-sys", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "socket2", @@ -1008,6 +1008,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487f1e0fcbe47deb8b0574e646def1c903389d95241dd1bbcc6ce4a715dfc0c1" + [[package]] name = "bk-tree" version = "0.4.0" @@ -1020,9 +1026,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -1088,9 +1094,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1" +checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" dependencies = [ "memchr", "serde", @@ -1104,19 +1110,20 @@ checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" dependencies = [ "proc-macro2", "quote", @@ -1131,9 +1138,9 @@ checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "bytemuck" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c041d3eab048880cb0b86b256447da3f18859a163c3b8d8893f4e6368abe6393" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" [[package]] name = "byteorder" @@ -1192,9 +1199,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6031a462f977dd38968b6f23378356512feeace69cef817e1a4475108093cec3" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" dependencies = [ "serde", ] @@ -1256,9 +1263,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -1327,7 +1334,7 @@ version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "clap_lex 0.2.4", "indexmap", "textwrap", @@ -1335,13 +1342,13 @@ dependencies = [ [[package]] name = "clap" -version = "4.1.8" +version = "4.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5" +checksum = "42dfd32784433290c51d92c438bb72ea5063797fc3cc9a21a8c4346bebbb2098" dependencies = [ - "bitflags", + "bitflags 2.0.2", "clap_derive", - "clap_lex 0.3.2", + "clap_lex 0.3.3", "is-terminal", "once_cell", "strsim", @@ -1350,9 +1357,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.1.8" +version = "4.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0" +checksum = "fddf67631444a3a3e3e5ac51c36a5e01335302de677bd78759eaa90ab1f46644" dependencies = [ "heck 0.4.1", "proc-macro-error", @@ -1372,9 +1379,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09" +checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646" dependencies = [ "os_str_bytes", ] @@ -1667,9 +1674,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1677,9 +1684,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1688,14 +1695,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] @@ -1711,9 +1718,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -1724,7 +1731,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crossterm_winapi", "libc", "mio", @@ -1759,6 +1766,18 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + [[package]] name = "csv-core" version = "0.1.10" @@ -1795,9 +1814,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.60+curl-7.88.1" +version = "0.4.61+curl-8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "717abe2cb465a5da6ce06617388a3980c9a2844196734bec8ccb8e575250f13f" +checksum = "14d05c10f541ae6f3bc5b3d923c20001f47db7d5f0b2bc6ad16490133842db79" dependencies = [ "cc", "libc", @@ -1810,9 +1829,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -1822,9 +1841,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -1837,15 +1856,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -1864,12 +1883,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.3", - "darling_macro 0.14.3", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -1888,9 +1907,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", @@ -1913,11 +1932,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.3", + "darling_core 0.14.4", "quote", "syn", ] @@ -2021,7 +2040,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", "syn", @@ -2039,9 +2058,9 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" +checksum = "7590f99468735a318c254ca9158d0c065aa9b5312896b5a043b5e39bc96f5fa2" dependencies = [ "proc-macro2", "quote", @@ -2138,9 +2157,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" [[package]] name = "easy-ext" @@ -2195,18 +2214,18 @@ dependencies = [ [[package]] name = "enum-iterator" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea166b3f7dc1032f7866d13f8d8e02c8d87507b61750176b86554964dc6a7bf" +checksum = "706d9e7cf1c7664859d79cd524e4e53ea2b67ea03c98cc2870c5e539695d597e" dependencies = [ "enum-iterator-derive", ] [[package]] name = "enum-iterator-derive" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "828de45d0ca18782232dfb8f3ea9cc428e8ced380eb26a520baaacfc70de39ce" +checksum = "355f93763ef7b0ae1c43c4d8eccc9d5848d84ad1a1d8ce61c421d1ac85a19d05" dependencies = [ "proc-macro2", "quote", @@ -2311,7 +2330,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "084632422a59165633e28f8436545a6694b3d659405ceb3be0a441d4bfbf25d1" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -2344,7 +2363,7 @@ version = "23.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77f5399c2c9c50ae9418e522842ad362f61ee48b346ac106807bd355a8a7c619" dependencies = [ - "bitflags", + "bitflags 1.3.2", "rustc_version", ] @@ -2439,9 +2458,9 @@ checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" dependencies = [ "futures-channel", "futures-core", @@ -2476,9 +2495,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -2486,15 +2505,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" dependencies = [ "futures-core", "futures-task", @@ -2503,9 +2522,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -2524,9 +2543,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" dependencies = [ "proc-macro2", "quote", @@ -2535,15 +2554,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-timer" @@ -2553,9 +2572,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-channel", "futures-core", @@ -2751,9 +2770,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -2968,9 +2987,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -3096,9 +3115,9 @@ dependencies = [ [[package]] name = "indextree" -version = "4.5.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497f036ac2fae75c34224648a77802e5dd4e9cfb56f4713ab6b12b7160a0523b" +checksum = "c40411d0e5c63ef1323c3d09ce5ec6d84d71531e18daed0743fccea279d7deb6" [[package]] name = "indicatif" @@ -3153,10 +3172,11 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" dependencies = [ + "hermit-abi 0.3.1", "libc", "windows-sys 0.45.0", ] @@ -3169,9 +3189,9 @@ checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "is-terminal" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857" +checksum = "8687c819457e979cc940d09cb16e42a1bf70aa6b60a549de6d3a62a0ee90c69e" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", @@ -3223,9 +3243,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jni" @@ -3249,9 +3269,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -3267,11 +3287,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -3387,9 +3407,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" @@ -3423,7 +3443,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7b603516767d1ab23d0de09d023e62966c3322f7148297c35cf3d97aa8b37fa" dependencies = [ - "clap 4.1.8", + "clap 4.1.11", "termcolor", "threadpool", ] @@ -3554,9 +3574,9 @@ dependencies = [ [[package]] name = "madsim" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3c98b41d46214f4ae435a95e246710ad7fb1100754f809dd7c18606a7607c4" +checksum = "c846a15d407458f1ac5da7da965810277229be9c96ed8082a3eaf2787ef81c23" dependencies = [ "ahash 0.7.6", "async-channel", @@ -3570,12 +3590,13 @@ dependencies = [ "madsim-macros", "naive-timer", "rand 0.8.5", + "rand_xoshiro", "rustversion", "serde", - "spin 0.9.5", + "spin 0.9.6", "tokio", "tokio-util", - "toml 0.7.2", + "toml 0.7.3", "tracing", "tracing-subscriber", ] @@ -3593,26 +3614,26 @@ dependencies = [ "bytes", "http", "madsim", - "spin 0.9.5", + "spin 0.9.6", "tracing", ] [[package]] name = "madsim-etcd-client" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb918383c4f5966f29760ec48820e1c2846739e4ae411c2a8aaa4466ce1421b7" +checksum = "72b3894525ac4b7d5732b2123f9d29d018005c96a218e5a7c38d1f42601b927d" dependencies = [ "etcd-client", "futures-util", "http", "madsim", "serde", - "serde_with 2.2.0", - "spin 0.9.5", + "serde_with 2.3.1", + "spin 0.9.6", "thiserror", "tokio", - "toml 0.7.2", + "toml 0.7.3", "tonic", "tracing", ] @@ -3623,7 +3644,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d248e97b1a48826a12c3828d921e8548e714394bf17274dd0a93910dc946e1" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", "syn", @@ -3632,8 +3653,7 @@ dependencies = [ [[package]] name = "madsim-rdkafka" version = "0.2.14-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945034b3d7c612f5bed8a34dcefd9278801bab180470e92d4b2297ddb3023cc8" +source = "git+https://github.com/madsim-rs/madsim.git?rev=43e025d#43e025db997df923cf6b891cfb874fe6dabba994" dependencies = [ "async-channel", "async-trait", @@ -3647,7 +3667,7 @@ dependencies = [ "serde_derive", "serde_json", "slab", - "spin 0.9.5", + "spin 0.9.6", "thiserror", "tokio", "tracing", @@ -3665,9 +3685,9 @@ dependencies = [ [[package]] name = "madsim-tonic" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420ca55ac297f5a3555cb03fdb085e7e91b1287dd872751a6b30dd3c3573277c" +checksum = "0a0d4e7468777e5885b6c3b88a97e3dd81547e0f3304324126c1a07ae89be470" dependencies = [ "async-stream", "chrono", @@ -3771,18 +3791,18 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -3802,25 +3822,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "minitrace" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a07fdf302cc0591c97eb45939550ddaddd9962e400c20b319aa16c244cb1f16" +checksum = "317e28b8c337ada2fd437611c241ce053d5b7f5480b79e945597996b87b1de96" dependencies = [ - "crossbeam", "futures", "minitrace-macro", "minstant", "once_cell", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "pin-project", - "retain_mut", ] [[package]] name = "minitrace-jaeger" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e130c7db911742ff545bb42d4d32c360cb272204ce021c50fd1c9f89fa65cf5b" +checksum = "7a5abe3273cd524b57b11925721595df5f8e957344c70e285a3a31c7e21523ac" dependencies = [ "async-std", "minitrace", @@ -3829,9 +3847,9 @@ dependencies = [ [[package]] name = "minitrace-macro" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4132dfe6097f4a90c0bbb34be0687c38d14303dd2e74f8442ae80e9bc5a34c47" +checksum = "77814d165883613a1846517efdc50b88fabd9c210b7ff4d3745b38b99d539652" dependencies = [ "proc-macro-error", "proc-macro2", @@ -3954,7 +3972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" dependencies = [ "autocfg", - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "memoffset 0.6.5", @@ -3967,7 +3985,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "static_assertions", @@ -3983,15 +4001,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "ntapi" version = "0.4.0" @@ -4151,7 +4160,7 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn", @@ -4215,9 +4224,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opendal" -version = "0.27.2" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef6f7b936f2f8483e19643357cb50d9ec9a49c506971ef69ca676913cf5afd91" +checksum = "ecd1bedb1311f05ef3a0cf6dbeb58ee0e01e75735d74003343c802d623b4c6d2" dependencies = [ "anyhow", "async-compat", @@ -4248,9 +4257,9 @@ dependencies = [ [[package]] name = "openidconnect" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a0f47b0f1499d08c4a8480c963d49c5ec77f4249c2b6869780979415f45809" +checksum = "98dd5b7049bac4fdd2233b8c9767d42c05da8006fdb79cc903258556d2b18009" dependencies = [ "base64 0.13.1", "chrono", @@ -4275,11 +4284,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -4307,18 +4316,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.0+1.1.1t" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173cd3626c43e3854b1b727422a276e568d9ec5fe8cec197822cf52cfb743d6" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" dependencies = [ "autocfg", "cc", @@ -4429,9 +4438,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" [[package]] name = "ouroboros" @@ -4571,9 +4580,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "path-absolutize" @@ -4815,16 +4824,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg", + "bitflags 1.3.2", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.45.0", ] [[package]] @@ -4935,6 +4946,12 @@ dependencies = [ "getopts", ] +[[package]] +name = "pretty-xmlish" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a14b10003fc996819269d0f74cc1c241707eec6d53f2bba6980758977ce386" + [[package]] name = "pretty_assertions" version = "1.3.0" @@ -4949,9 +4966,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.1.23" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", "syn", @@ -4968,12 +4985,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit 0.18.1", + "toml_edit", ] [[package]] @@ -5021,7 +5038,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0941606b9934e2d98a3677759a971756eb821f75764d0e0d26946d08e74d9104" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "hex", "lazy_static", @@ -5034,7 +5051,7 @@ version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de8dacb0873f77e6aefc6d71e044761fcc68060290f5b1089fcdf84626bb69" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "hex", "lazy_static", @@ -5224,7 +5241,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63" dependencies = [ - "bitflags", + "bitflags 1.3.2", "getopts", "memchr", "unicase", @@ -5300,6 +5317,16 @@ dependencies = [ "serde", ] +[[package]] +name = "quick-xml" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c1a97b1bc42b1d550bfb48d4262153fe400a12bab1511821736f7eac76d7e2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "quote" version = "1.0.23" @@ -5380,6 +5407,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "random-string" version = "1.0.0" @@ -5391,18 +5427,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.6.1" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307f7aacdbab3f0adee67d52739a1d71112cc068d6fab169ddeb18e48877fad" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -5410,9 +5446,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -5423,8 +5459,7 @@ dependencies = [ [[package]] name = "rdkafka-sys" version = "4.3.0+1.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d222a401698c7f2010e3967353eae566d9934dcda49c29910da922414ab4e3f4" +source = "git+https://github.com/MaterializeInc/rust-rdkafka?rev=8ea07c4#8ea07c4d2b96636ff093e670bc921892aee0d56a" dependencies = [ "cmake", "libc", @@ -5455,7 +5490,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -5512,9 +5547,9 @@ dependencies = [ [[package]] name = "reqsign" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4d5fefeaaa1e64f4aabb79da4ea68bf6d0e7935ad927728280d2a8e95735fc" +checksum = "a7db6d8d2cd7fa61403d14de670f98d7cedac38143681c124943d7bb69258b3a" dependencies = [ "anyhow", "backon", @@ -5529,7 +5564,7 @@ dependencies = [ "log", "once_cell", "percent-encoding", - "quick-xml 0.27.1", + "quick-xml 0.28.1", "rand 0.8.5", "rsa", "rust-ini", @@ -5543,9 +5578,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "0ba30cc2c0cd02af1222ed216ba659cdb2f879dfe3181852fe7c50b1d0005949" dependencies = [ "base64 0.21.0", "bytes", @@ -5623,7 +5658,7 @@ version = "0.2.0-alpha" dependencies = [ "anyhow", "chrono", - "clap 4.1.8", + "clap 4.1.11", "console", "dialoguer", "enum-iterator", @@ -5638,7 +5673,7 @@ dependencies = [ "regex", "serde", "serde_json", - "serde_with 2.2.0", + "serde_with 2.3.1", "serde_yaml", "tempfile", "workspace-hack", @@ -5669,7 +5704,7 @@ dependencies = [ name = "risingwave_backup_cmd" version = "0.2.0-alpha" dependencies = [ - "clap 4.1.8", + "clap 4.1.11", "madsim-tokio", "risingwave_backup", "risingwave_meta", @@ -5729,7 +5764,7 @@ dependencies = [ "bcc", "bytes", "bytesize", - "clap 4.1.8", + "clap 4.1.11", "futures", "hdrhistogram", "isahc", @@ -5746,7 +5781,7 @@ dependencies = [ "risingwave_storage", "serde", "tokio-stream", - "toml 0.5.11", + "toml 0.7.3", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -5758,7 +5793,7 @@ name = "risingwave_cmd" version = "0.2.0-alpha" dependencies = [ "anyhow", - "clap 4.1.8", + "clap 4.1.11", "madsim-tokio", "risingwave_common", "risingwave_compactor", @@ -5778,7 +5813,7 @@ name = "risingwave_cmd_all" version = "0.2.0-alpha" dependencies = [ "anyhow", - "clap 4.1.8", + "clap 4.1.11", "console", "madsim-tokio", "risingwave_common", @@ -5806,12 +5841,12 @@ dependencies = [ "arrow-schema", "async-trait", "auto_enums", - "bitflags", + "bitflags 2.0.2", "byteorder", "bytes", "chrono", "chrono-tz", - "clap 4.1.8", + "clap 4.1.11", "comfy-table", "crc32fast", "criterion", @@ -5843,6 +5878,7 @@ dependencies = [ "prost 0.11.8", "rand 0.8.5", "regex", + "reqwest", "risingwave_pb", "rust_decimal", "ryu", @@ -5854,10 +5890,11 @@ dependencies = [ "sysinfo", "tempfile", "thiserror", - "toml 0.5.11", + "toml 0.7.3", "tracing", "twox-hash", "url", + "uuid", "workspace-hack", ] @@ -5898,7 +5935,7 @@ dependencies = [ "anyhow", "async-trait", "bytes", - "clap 4.1.8", + "clap 4.1.11", "futures", "itertools", "madsim-tokio", @@ -5922,8 +5959,9 @@ dependencies = [ name = "risingwave_compactor" version = "0.2.0-alpha" dependencies = [ + "anyhow", "async-trait", - "clap 4.1.8", + "clap 4.1.11", "madsim-tokio", "madsim-tonic", "prometheus", @@ -5935,6 +5973,8 @@ dependencies = [ "risingwave_pb", "risingwave_rpc_client", "risingwave_storage", + "serde", + "serde_json", "tracing", "workspace-hack", ] @@ -5946,7 +5986,7 @@ dependencies = [ "anyhow", "async-trait", "await-tree", - "clap 4.1.8", + "clap 4.1.11", "either", "futures", "futures-async-stream", @@ -5971,12 +6011,14 @@ dependencies = [ "risingwave_storage", "risingwave_stream", "risingwave_tracing", + "serde", "serde_json", "tempfile", "tikv-jemalloc-ctl", "tokio-stream", "tower", "tracing", + "uuid", "workspace-hack", ] @@ -5988,6 +6030,7 @@ dependencies = [ "apache-avro", "async-trait", "aws-config", + "aws-sdk-ec2", "aws-sdk-kinesis", "aws-sdk-s3", "aws-smithy-http", @@ -5996,7 +6039,7 @@ dependencies = [ "byteorder", "bytes", "chrono", - "csv-core", + "csv", "duration-str", "enum-as-inner", "futures", @@ -6031,7 +6074,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "serde_with 2.2.0", + "serde_with 2.3.1", "simd-json", "tempfile", "thiserror", @@ -6052,7 +6095,7 @@ dependencies = [ "anyhow", "bytes", "chrono", - "clap 4.1.8", + "clap 4.1.11", "comfy-table", "futures", "itertools", @@ -6074,6 +6117,21 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "risingwave_e2e_extended_mode_test" +version = "0.2.0-alpha" +dependencies = [ + "anyhow", + "chrono", + "clap 4.1.11", + "pg_interval", + "rust_decimal", + "tokio", + "tokio-postgres", + "tracing", + "tracing-subscriber", +] + [[package]] name = "risingwave_expr" version = "0.2.0-alpha" @@ -6082,11 +6140,14 @@ dependencies = [ "anyhow", "arrow-array", "arrow-schema", + "async-trait", "chrono", "chrono-tz", "criterion", + "ctor", "dyn-clone", "either", + "futures-util", "itertools", "madsim-tokio", "md5", @@ -6096,13 +6157,28 @@ dependencies = [ "paste", "regex", "risingwave_common", + "risingwave_expr_macro", "risingwave_pb", "risingwave_udf", + "serde_json", "speedate", + "static_assertions", "thiserror", + "tracing", "workspace-hack", ] +[[package]] +name = "risingwave_expr_macro" +version = "0.1.0" +dependencies = [ + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "risingwave_frontend" version = "0.2.0-alpha" @@ -6115,7 +6191,7 @@ dependencies = [ "async-trait", "bk-tree", "bytes", - "clap 4.1.8", + "clap 4.1.11", "derivative", "downcast-rs", "dyn-clone", @@ -6124,6 +6200,7 @@ dependencies = [ "fixedbitset", "futures", "futures-async-stream", + "iana-time-zone", "itertools", "lazy_static", "madsim-tokio", @@ -6138,6 +6215,7 @@ dependencies = [ "pgwire", "pin-project-lite", "postgres-types", + "pretty-xmlish", "prometheus", "rand 0.8.5", "risingwave_batch", @@ -6217,6 +6295,7 @@ version = "0.1.0" dependencies = [ "bytes", "futures", + "itertools", "jni", "madsim-tokio", "prost 0.11.8", @@ -6237,9 +6316,11 @@ dependencies = [ "arc-swap", "assert_matches", "async-trait", + "aws-config", + "aws-sdk-ec2", "axum", "bytes", - "clap 4.1.8", + "clap 4.1.11", "crepe", "easy-ext", "either", @@ -6273,6 +6354,7 @@ dependencies = [ "risingwave_object_store", "risingwave_pb", "risingwave_rpc_client", + "risingwave_sqlparser", "risingwave_test_runner", "scopeguard", "serde", @@ -6312,7 +6394,7 @@ dependencies = [ "prometheus", "random-string", "risingwave_common", - "spin 0.9.5", + "spin 0.9.6", "tempfile", "thiserror", "tracing", @@ -6322,6 +6404,7 @@ dependencies = [ name = "risingwave_pb" version = "0.2.0-alpha" dependencies = [ + "enum-as-inner", "madsim-tonic", "madsim-tonic-build", "pbjson", @@ -6346,7 +6429,7 @@ dependencies = [ "risingwave_frontend", "risingwave_sqlparser", "serde", - "serde_with 2.2.0", + "serde_with 2.3.1", "serde_yaml", "tempfile", "walkdir", @@ -6358,7 +6441,7 @@ name = "risingwave_regress_test" version = "0.2.0-alpha" dependencies = [ "anyhow", - "clap 4.1.8", + "clap 4.1.11", "madsim-tokio", "path-absolutize", "similar", @@ -6414,17 +6497,19 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.1.8", + "clap 4.1.11", "console", "futures", "glob", "itertools", + "lru", "madsim", "madsim-aws-sdk-s3", "madsim-etcd-client", "madsim-rdkafka", "madsim-tokio", "paste", + "pin-project", "pretty_assertions", "rand 0.8.5", "risingwave_common", @@ -6434,6 +6519,7 @@ dependencies = [ "risingwave_frontend", "risingwave_meta", "risingwave_pb", + "risingwave_sqlparser", "risingwave_sqlsmith", "serde", "serde_derive", @@ -6483,8 +6569,12 @@ name = "risingwave_sqlparser_test_runner" version = "0.1.0" dependencies = [ "anyhow", + "console", + "futures", + "madsim-tokio", "risingwave_sqlparser", "serde", + "serde_with 2.3.1", "serde_yaml", "walkdir", "workspace-hack", @@ -6496,12 +6586,13 @@ version = "0.2.0-alpha" dependencies = [ "anyhow", "chrono", - "clap 4.1.8", + "clap 4.1.11", "itertools", "libtest-mimic", "madsim-tokio", "rand 0.8.5", "rand_chacha 0.3.1", + "regex", "risingwave_common", "risingwave_expr", "risingwave_frontend", @@ -6512,6 +6603,27 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "risingwave_state_cleaning_test" +version = "0.2.0-alpha" +dependencies = [ + "anyhow", + "chrono", + "clap 4.1.11", + "futures", + "itertools", + "madsim-tokio", + "regex", + "risingwave_rt", + "serde", + "serde_with 2.3.1", + "tokio-postgres", + "tokio-stream", + "toml 0.7.3", + "tracing", + "workspace-hack", +] + [[package]] name = "risingwave_storage" version = "0.2.0-alpha" @@ -6523,6 +6635,7 @@ dependencies = [ "bytes", "criterion", "crossbeam", + "darwin-libproc", "dashmap", "dyn-clone", "enum-as-inner", @@ -6530,9 +6643,11 @@ dependencies = [ "fiemap", "futures", "futures-async-stream", + "hex", "itertools", "libc", "lz4", + "mach", "madsim-tokio", "memcomparable", "minitrace", @@ -6540,6 +6655,7 @@ dependencies = [ "moka", "nix 0.25.1", "parking_lot 0.12.1", + "procfs 0.12.0", "prometheus", "prost 0.11.8", "rand 0.8.5", @@ -6554,8 +6670,9 @@ dependencies = [ "risingwave_tracing", "scopeguard", "sled", - "spin 0.9.5", + "spin 0.9.6", "sync-point", + "sysinfo", "tempfile", "thiserror", "tokio-retry", @@ -6612,6 +6729,7 @@ dependencies = [ "risingwave_rpc_client", "risingwave_source", "risingwave_storage", + "serde_json", "smallvec", "static_assertions", "task_stats_alloc", @@ -6619,6 +6737,7 @@ dependencies = [ "tokio-metrics", "tokio-stream", "tracing", + "tracing-test", "workspace-hack", ] @@ -6691,9 +6810,9 @@ checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rsa" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b3896c9b7790b70a9aa314a30e4ae114200992a19c96cbe0ca6070edd32ab8" +checksum = "55a77d189da1fee555ad95b7e50e7457d91c0e089ec68ca69ad2989413bbdab4" dependencies = [ "byteorder", "digest", @@ -6722,9 +6841,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.28.1" +version = "1.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8" +checksum = "2b1b21b8760b0ef8ae5b43d40913ff711a2053cb7ff892a34facff7a6365375a" dependencies = [ "arrayvec", "borsh", @@ -6757,11 +6876,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" +version = "0.36.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" +checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -6804,15 +6923,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "same-file" @@ -6847,9 +6966,9 @@ dependencies = [ [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot 0.12.1", ] @@ -6868,9 +6987,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -6894,7 +7013,7 @@ version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -6913,18 +7032,18 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -6941,9 +7060,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", @@ -6952,9 +7071,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -6963,9 +7082,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189" dependencies = [ "serde", ] @@ -7023,9 +7142,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d904179146de381af4c93d3af6ca4984b3152db687dacb9c3c35e86f39809c" +checksum = "85456ffac572dc8826334164f2fb6fb40a7c766aebe195a2a21ee69ee2885ecf" dependencies = [ "base64 0.13.1", "chrono", @@ -7033,7 +7152,7 @@ dependencies = [ "indexmap", "serde", "serde_json", - "serde_with_macros 2.2.0", + "serde_with_macros 2.3.1", "time 0.3.17", ] @@ -7051,11 +7170,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" +checksum = "7cbcd6104f8a4ab6af7f6be2a0da6be86b9de3c401f6e86bb856ab2af739232f" dependencies = [ - "darling 0.14.3", + "darling 0.14.4", "proc-macro2", "quote", "syn", @@ -7063,9 +7182,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.17" +version = "0.9.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567" +checksum = "f82e6c8c047aa50a7328632d067bcae6ef38772a79e28daf32f735e0e4f3dd10" dependencies = [ "indexmap", "itoa", @@ -7310,9 +7429,9 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -7336,9 +7455,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dccf47db1b41fa1573ed27ccf5e08e3ca771cb994f776668c5ebda893b248fc" +checksum = "b5d6e0250b93c8427a177b849d144a96d5acc57006149479403d7861ab721e34" dependencies = [ "lock_api", ] @@ -7501,7 +7620,7 @@ version = "0.1.0" dependencies = [ "futures-util", "madsim-tokio", - "spin 0.9.5", + "spin 0.9.6", "thiserror", ] @@ -7522,7 +7641,6 @@ dependencies = [ "libc", "ntapi", "once_cell", - "rayon", "winapi", ] @@ -7570,18 +7688,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -7633,12 +7751,12 @@ dependencies = [ [[package]] name = "thrift_codec" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb61fb3d0a0af14949f3a6949b2639112e13226647112824f4d081533f9b1a8" +checksum = "fce3200b189fd4733eb2bb22235755c8aa0361ba1c66b67db54893144d147279" dependencies = [ "byteorder", - "trackable 0.2.24", + "trackable", ] [[package]] @@ -7748,9 +7866,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -7764,7 +7882,7 @@ dependencies = [ "socket2", "tokio-macros", "tracing", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -7900,22 +8018,16 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" dependencies = [ "serde", "serde_spanned", - "toml_datetime 0.6.1", - "toml_edit 0.19.4", + "toml_datetime", + "toml_edit", ] -[[package]] -name = "toml_datetime" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" - [[package]] name = "toml_datetime" version = "0.6.1" @@ -7927,25 +8039,14 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" -dependencies = [ - "indexmap", - "nom8", - "toml_datetime 0.5.1", -] - -[[package]] -name = "toml_edit" -version = "0.19.4" +version = "0.19.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +checksum = "dc18466501acd8ac6a3f615dd29a3438f8ca6bb3b19537138b3106e575621274" dependencies = [ "indexmap", "serde", "serde_spanned", - "toml_datetime 0.6.1", + "toml_datetime", "winnow", ] @@ -8024,7 +8125,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ - "bitflags", + "bitflags 1.3.2", "bytes", "futures-core", "futures-util", @@ -8038,7 +8139,6 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower", "tower-layer", "tower-service", ] @@ -8145,13 +8245,26 @@ dependencies = [ ] [[package]] -name = "trackable" -version = "0.2.24" +name = "tracing-test" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98abb9e7300b9ac902cc04920945a874c1973e08c310627cc4458c04b70dd32" +checksum = "3a2c0ff408fe918a94c428a3f2ad04e4afd5c95bbc08fcf868eff750c15728a4" dependencies = [ - "trackable 1.2.0", - "trackable_derive", + "lazy_static", + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" +dependencies = [ + "lazy_static", + "quote", + "syn", ] [[package]] @@ -8239,15 +8352,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -8272,9 +8385,9 @@ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unsafe-libyaml" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" +checksum = "ad2024452afd3874bf539695e04af6732ba06517424dbf958fdb16a01f3bef6c" [[package]] name = "untrusted" @@ -8375,12 +8488,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8526,15 +8638,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -8612,9 +8715,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -8627,51 +8730,51 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winnow" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf09497b8f8b5ac5d3bb4d05c0a99be20f26fd3d5f2db7b0716e946d5103658" +checksum = "23d020b441f92996c80d94ae9166e8501e59c7bb56121189dc9eab3bd8216966" dependencies = [ "memchr", ] @@ -8726,7 +8829,6 @@ dependencies = [ "anyhow", "arrayvec", "auto_enums", - "auto_enums_derive", "aws-sdk-s3", "aws-smithy-client", "aws-types", @@ -8734,14 +8836,13 @@ dependencies = [ "bytes", "cc", "chrono", - "clap 4.1.8", + "clap 4.1.11", "combine", "criterion", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", "crossbeam-utils", - "digest", "either", "fail", "fixedbitset", @@ -8780,6 +8881,7 @@ dependencies = [ "petgraph", "phf", "phf_shared", + "postgres-types", "proc-macro2", "prometheus", "prost 0.11.8", @@ -8791,8 +8893,11 @@ dependencies = [ "regex-syntax", "reqwest", "ring", + "rust_decimal", "scopeguard", "serde", + "serde_json", + "serde_with 2.3.1", "smallvec", "socket2", "strum", @@ -8800,12 +8905,12 @@ dependencies = [ "syn", "time 0.3.17", "tokio", + "tokio-postgres", "tokio-stream", "tokio-util", "tonic", "tonic-build", "tower", - "tower-http", "tracing", "tracing-core", "tracing-futures", diff --git a/Cargo.toml b/Cargo.toml index 69cecc51c7237..640214a213d91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "src/connector", "src/ctl", "src/expr", + "src/expr/macro", "src/frontend", "src/frontend/planner_test", "src/java_binding", @@ -32,9 +33,11 @@ members = [ "src/stream", "src/test_runner", "src/tests/compaction_test", + "src/tests/e2e_extended_mode", "src/tests/regress", "src/tests/simulation", "src/tests/sqlsmith", + "src/tests/state_cleaning_test", "src/tracing", "src/udf", "src/utils/local_stats_alloc", @@ -57,6 +60,7 @@ repository = "https://github.com/risingwavelabs/risingwave" aws-config = { version = "0.51", default-features = false, features = ["rt-tokio", "native-tls"] } aws-sdk-kinesis = { version = "0.21", default-features = false, features = ["rt-tokio", "native-tls"] } aws-sdk-s3 = { version = "0.21", default-features = false, features = ["rt-tokio","native-tls"] } +aws-sdk-ec2 = { version = "0.21", default-features = false, features = ["rt-tokio","native-tls"] } aws-sdk-sqs = { version = "0.21", default-features = false, features = ["rt-tokio", "native-tls"] } aws-smithy-http = "0.51" aws-smithy-types = "0.51" @@ -121,3 +125,4 @@ tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0c25710" tokio-retry = { git = "https://github.com/madsim-rs/rust-tokio-retry.git", rev = "95e2fd3" } tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc" } postgres-types = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc" } +madsim-rdkafka = { git = "https://github.com/madsim-rs/madsim.git", rev = "43e025d" } diff --git a/Makefile.toml b/Makefile.toml index 94b199d94439d..403213cc0b0b2 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -9,6 +9,7 @@ extend = [ { path = "src/risedevtool/redis.toml" }, { path = "src/risedevtool/connector.toml" }, { path = "src/risedevtool/risedev-components.toml" }, + { path = "src/sqlparser/test_runner/sqlparser_test.toml"}, { path = "src/frontend/planner_test/planner_test.toml" }, { path = "src/tests/compaction_test/Makefile.toml" }, { path = "src/storage/backup/integration_tests/Makefile.toml" }, @@ -21,6 +22,8 @@ env_scripts = [ ''' #!@duckscript +set_env ENABLE_TELEMETRY "false" + is_sanitizer_enabled = get_env ENABLE_SANITIZER is_all_in_one_enabled = get_env ENABLE_ALL_IN_ONE is_hdfs_backend = get_env ENABLE_HDFS @@ -163,7 +166,7 @@ script = ''' #!/usr/bin/env bash set -e -if [[ -z "$1" ]]; then +if [[ -z "$1" ]]; then echo "Please pass a parameter to this script, defining which logs you want to follow" echo "Available logs are..." ls ${PREFIX_LOG} @@ -175,7 +178,7 @@ if [[ ! -f ${PREFIX_LOG}/$1 ]]; then echo "Available logs are..." ls ${PREFIX_LOG} exit 1 -fi +fi tail -f -n 5 ${PREFIX_LOG}/$1 ''' @@ -370,7 +373,6 @@ description = "Download all available components at once" dependencies = [ "download-connector", "download-maven", - "build-connector-node", "download-etcd", "download-grafana", "download-jaeger", @@ -414,6 +416,7 @@ dependencies = [ "create-user-profiles-file", "download-all", "build-risingwave", + "build-connector-node", "post-build-risingwave", "extract-dashboard-artifact", "export-dashboard-v2", @@ -440,6 +443,7 @@ alias = "playground" [tasks.playground] category = "RiseDev - Start" description = "Start a lite RisingWave playground using risingwave all-in-one binary" +dependencies = ["download-connector"] script = ''' #!/usr/bin/env bash @@ -599,11 +603,18 @@ else fi ARTIFACT="risingwave-connector-1.0.0.tar.gz" +TARGET_PATH="${JAVA_DIR}/connector-node/assembly/target/${ARTIFACT}" -cd "${JAVA_DIR}" -"${MAVEN_PATH}" --batch-mode --update-snapshots clean package -Dmaven.test.skip +if [[ ! -f ${TARGET_PATH} ]] || [[ ! -z ${REBUILD_CONNECTOR_NODE} ]]; then + echo "Rebuild connector node" + cd "${JAVA_DIR}" + "${MAVEN_PATH}" --batch-mode --update-snapshots clean package -Dmaven.test.skip +else + echo "Connector node was built already. Skipped. Set REBUILD_CONNECTOR_NODE=1 to enable rebuild" +fi +rm -rf ${PREFIX_BIN}/connector-node mkdir -p "${PREFIX_BIN}/connector-node" -tar xf "${JAVA_DIR}/connector-node/assembly/target/${ARTIFACT}" -C "${PREFIX_BIN}/connector-node" +tar xf ${TARGET_PATH} -C "${PREFIX_BIN}/connector-node" ''' @@ -611,12 +622,13 @@ tar xf "${JAVA_DIR}/connector-node/assembly/target/${ARTIFACT}" -C "${PREFIX_BIN category = "RiseDev - Build in simulation mode" description = "Build in simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e cargo build \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ -p risingwave_batch \ -p risingwave_common \ -p risingwave_compute \ @@ -635,12 +647,13 @@ cargo build \ category = "RiseDev - Deterministic Simulation Test" description = "Run unit tests in deterministic simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e cargo nextest run \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ -p risingwave_batch \ -p risingwave_common \ -p risingwave_compute \ @@ -659,12 +672,13 @@ cargo nextest run \ category = "RiseDev - Simulation scaling tests" description = "Run integration scaling tests in deterministic simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e cargo nextest run \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ -p risingwave_simulation \ "$@" """ @@ -673,12 +687,13 @@ cargo nextest run \ category = "RiseDev - Simulation scaling tests" description = "Archive integration scaling tests in deterministic simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e cargo nextest archive \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ -p risingwave_simulation \ --archive-file scale-test.tar.zst \ "$@" @@ -688,48 +703,58 @@ cargo nextest archive \ category = "RiseDev - Deterministic Simulation End-to-end Test" description = "Run cargo check in deterministic simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e -cargo check -p risingwave_simulation "$@" +cargo check \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ + -p risingwave_simulation --all-targets "$@" """ [tasks.sslt] category = "RiseDev - Deterministic Simulation End-to-end Test" description = "Run e2e tests in deterministic simulation mode" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e -cargo run -p risingwave_simulation "$@" +cargo run \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ + -p risingwave_simulation "$@" """ [tasks.sslt-build-all] category = "RiseDev - Deterministic Simulation End-to-end Test" description = "Build deterministic simulation runner and tests" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim" } +env = { CARGO_TARGET_DIR = "target/sim" } script = """ #!/usr/bin/env bash set -e -cargo build -p risingwave_simulation --tests "$@" +cargo build \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ + -p risingwave_simulation \ + --tests "$@" """ [tasks.sslt-cov] category = "RiseDev - Deterministic Simulation End-to-end Test" description = "Run e2e tests in deterministic simulation mode and report code coverage" dependencies = ["warn-on-missing-tools"] -env = { RUSTFLAGS = "-Ctarget-cpu=native --cfg tokio_unstable --cfg madsim", RUSTDOCFLAGS = "--cfg madsim", CARGO_TARGET_DIR = "target/sim-cov" } +env = { CARGO_TARGET_DIR = "target/sim-cov" } script = """ #!/usr/bin/env bash set -e -cargo llvm-cov run -p risingwave_simulation --html "$@" +cargo llvm-cov run \ + --config "target.'cfg(all())'.rustflags = ['--cfg=madsim']" \ + -p risingwave_simulation \ + --html "$@" """ [tasks.check-java] @@ -748,7 +773,7 @@ else fi cd "${JAVA_DIR}" -"${MAVEN_PATH}" spotless:check +"${MAVEN_PATH}" spotless:check -q """ [tasks.check-java-fix] @@ -1045,3 +1070,14 @@ set -e cp -a .risingwave/rw-docker/* docker ''' + +[tasks.generate-example-config] +category = "misc" +description = "Generate default config and write to src/config/example.toml" +script = ''' +cat << EOF > src/config/example.toml +# This file is generated by ./risedev generate-example-config +# Check detailed comments in src/common/src/config.rs +EOF +cargo run --bin example-config >> src/config/example.toml +''' diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index d9c8a9fee0b9d..ca8e826cbf62b 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -43,10 +43,11 @@ cargo build \ -p risingwave_compaction_test \ -p risingwave_backup_cmd \ -p risingwave_java_binding \ + -p risingwave_e2e_extended_mode_test \ --features "static-link static-log-level" --profile "$profile" # the file name suffix of artifact for risingwave_java_binding is so only for linux. It is dylib for MacOS -artifacts=(risingwave sqlsmith compaction-test backup-restore risingwave_regress_test risedev-dev delete-range-test librisingwave_java_binding.so) +artifacts=(risingwave sqlsmith compaction-test backup-restore risingwave_regress_test risingwave_e2e_extended_mode_test risedev-dev delete-range-test librisingwave_java_binding.so) echo "--- Show link info" ldd target/"$target"/risingwave diff --git a/ci/scripts/common.env.sh b/ci/scripts/common.env.sh index 31e95d47737c9..dbaded683b351 100644 --- a/ci/scripts/common.env.sh +++ b/ci/scripts/common.env.sh @@ -3,6 +3,8 @@ export PROTOC_NO_VENDOR=true export CARGO_HOME=/risingwave/.cargo export RISINGWAVE_CI=true export RUST_BACKTRACE=1 +export ENABLE_TELEMETRY=false + if [ -n "${BUILDKITE_COMMIT:-}" ]; then export GIT_SHA=$BUILDKITE_COMMIT fi diff --git a/ci/scripts/deterministic-e2e-test.sh b/ci/scripts/deterministic-e2e-test.sh index 97aba505f6693..bc2c9936138e6 100755 --- a/ci/scripts/deterministic-e2e-test.sh +++ b/ci/scripts/deterministic-e2e-test.sh @@ -45,4 +45,4 @@ echo "--- deterministic simulation e2e, ci-3cn-2fe, parallel, batch" seq $TEST_NUM | parallel MADSIM_TEST_SEED={} './risingwave_simulation -j 16 ./e2e_test/batch/\*\*/\*.slt 2> $LOGDIR/parallel-batch-{}.log && rm $LOGDIR/parallel-batch-{}.log' echo "--- deterministic simulation e2e, ci-3cn-2fe, fuzzing (pre-generated-queries)" -seq $TEST_NUM | parallel MADSIM_TEST_SEED={} './risingwave_simulation --run-sqlsmith-queries ./src/tests/sqlsmith/tests/sqlsmith-query-snapshots/{} 2> $LOGDIR/fuzzing-{}.log && rm $LOGDIR/fuzzing-{}.log' +seq 64 | parallel MADSIM_TEST_SEED={} './risingwave_simulation --run-sqlsmith-queries ./src/tests/sqlsmith/tests/sqlsmith-query-snapshots/{} 2> $LOGDIR/fuzzing-{}.log && rm $LOGDIR/fuzzing-{}.log' diff --git a/ci/scripts/e2e-iceberg-sink-test.sh b/ci/scripts/e2e-iceberg-sink-test.sh index 4fe338dbffe1e..99b417c0c4f85 100755 --- a/ci/scripts/e2e-iceberg-sink-test.sh +++ b/ci/scripts/e2e-iceberg-sink-test.sh @@ -25,8 +25,13 @@ echo "--- Download artifacts" mkdir -p target/debug buildkite-agent artifact download risingwave-"$profile" target/debug/ buildkite-agent artifact download risedev-dev-"$profile" target/debug/ +buildkite-agent artifact download librisingwave_java_binding.so-"$profile" target/debug mv target/debug/risingwave-"$profile" target/debug/risingwave mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev +mv target/debug/librisingwave_java_binding.so-"$profile" target/debug/librisingwave_java_binding.so + +export RW_JAVA_BINDING_LIB_PATH=${PWD}/target/debug +export RW_CONNECTOR_RPC_SINK_PAYLOAD_FORMAT=stream_chunk echo "--- Download connector node package" buildkite-agent artifact download risingwave-connector.tar.gz ./ @@ -45,6 +50,7 @@ cargo make pre-start-dev cargo make link-all-in-one-binaries echo "--- starting risingwave cluster with connector node" +mkdir -p .risingwave/log ./connector-node/start-service.sh -p 50051 > .risingwave/log/connector-sink.log 2>&1 & cargo make ci-start ci-iceberg-test sleep 1 @@ -62,7 +68,7 @@ spark-3.3.1-bin-hadoop3/bin/spark-sql --packages $DEPENDENCIES \ --conf spark.sql.catalog.demo.hadoop.fs.s3a.endpoint=http://127.0.0.1:9301 \ --conf spark.sql.catalog.demo.hadoop.fs.s3a.access.key=hummockadmin \ --conf spark.sql.catalog.demo.hadoop.fs.s3a.secret.key=hummockadmin \ - --S --e "CREATE TABLE demo.demo_db.demo_table(v1 int, v2 int) TBLPROPERTIES ('format-version'='2');" + --S --e "CREATE TABLE demo.demo_db.demo_table(v1 int, v2 bigint, v3 string) TBLPROPERTIES ('format-version'='2');" echo "--- testing sinks" sqllogictest -p 4566 -d dev './e2e_test/sink/iceberg_sink.slt' @@ -80,13 +86,13 @@ spark-3.3.1-bin-hadoop3/bin/spark-sql --packages $DEPENDENCIES \ # check sink destination using shell if cat ./spark-output/*.csv | sort | awk -F "," '{ -if ($1 == 1 && $2 == 2) c1++; - if ($1 == 13 && $2 == 2) c2++; - if ($1 == 21 && $2 == 2) c3++; - if ($1 == 2 && $2 == 2) c4++; - if ($1 == 3 && $2 == 2) c5++; - if ($1 == 5 && $2 == 2) c6++; - if ($1 == 8 && $2 == 2) c7++; } +if ($1 == 1 && $2 == 50 && $3 == "1-50") c1++; + if ($1 == 13 && $2 == 2 && $3 == "13-2") c2++; + if ($1 == 21 && $2 == 2 && $3 == "21-2") c3++; + if ($1 == 2 && $2 == 2 && $3 == "2-2") c4++; + if ($1 == 3 && $2 == 2 && $3 == "3-2") c5++; + if ($1 == 5 && $2 == 2 && $3 == "5-2") c6++; + if ($1 == 8 && $2 == 2 && $3 == "8-2") c7++; } END { exit !(c1 == 1 && c2 == 1 && c3 == 1 && c4 == 1 && c5 == 1 && c6 == 1 && c7 == 1); }'; then echo "Iceberg sink check passed" else diff --git a/ci/scripts/e2e-sink-test.sh b/ci/scripts/e2e-sink-test.sh index 9eb8d72107b1f..2b481feb38de0 100755 --- a/ci/scripts/e2e-sink-test.sh +++ b/ci/scripts/e2e-sink-test.sh @@ -25,8 +25,14 @@ echo "--- Download artifacts" mkdir -p target/debug buildkite-agent artifact download risingwave-"$profile" target/debug/ buildkite-agent artifact download risedev-dev-"$profile" target/debug/ +buildkite-agent artifact download librisingwave_java_binding.so-"$profile" target/debug mv target/debug/risingwave-"$profile" target/debug/risingwave mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev +mv target/debug/librisingwave_java_binding.so-"$profile" target/debug/librisingwave_java_binding.so + +export RW_JAVA_BINDING_LIB_PATH=${PWD}/target/debug +# TODO: Switch to stream_chunk encoding once it's completed, and then remove json encoding as well as this env var. +export RW_CONNECTOR_RPC_SINK_PAYLOAD_FORMAT=json echo "--- Download connector node package" buildkite-agent artifact download risingwave-connector.tar.gz ./ @@ -50,7 +56,7 @@ mysql --host=mysql --port=3306 -u root -p123456 -e "CREATE DATABASE IF NOT EXIST # grant access to `test` for ci test user mysql --host=mysql --port=3306 -u root -p123456 -e "GRANT ALL PRIVILEGES ON test.* TO 'mysqluser'@'%';" # create a table named t_remote -mysql --host=mysql --port=3306 -u root -p123456 -e "CREATE TABLE IF NOT EXISTS test.t_remote (id INT, name VARCHAR(255), PRIMARY KEY (id));" +mysql --host=mysql --port=3306 -u root -p123456 test < ./e2e_test/sink/remote/mysql_create_table.sql echo "--- preparing postgresql" @@ -59,12 +65,16 @@ apt-get -y install postgresql-client export PGPASSWORD=postgres psql -h db -U postgres -c "CREATE ROLE test LOGIN SUPERUSER PASSWORD 'connector';" createdb -h db -U postgres test -psql -h db -U postgres -d test -c "CREATE TABLE t4 (v1 int, v2 int);" -psql -h db -U postgres -d test -c "CREATE TABLE t_remote (id serial PRIMARY KEY, name VARCHAR (50) NOT NULL);" +psql -h db -U postgres -d test -c "CREATE TABLE t4 (v1 int PRIMARY KEY, v2 int);" +psql -h db -U postgres -d test < ./e2e_test/sink/remote/pg_create_table.sql node_port=50051 node_timeout=10 -./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-source.log 2>&1 & + +echo "--- starting risingwave cluster with connector node" +cargo make ci-start ci-1cn-1fe +./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-node.log 2>&1 & + echo "waiting for connector node to start" start_time=$(date +%s) while : @@ -83,8 +93,6 @@ do sleep 0.1 done -echo "--- starting risingwave cluster with connector node" -cargo make ci-start ci-1cn-1fe echo "--- testing sinks" sqllogictest -p 4566 -d dev './e2e_test/sink/append_only_sink.slt' @@ -99,13 +107,9 @@ sqllogictest -h db -p 5432 -d test './e2e_test/sink/remote/jdbc.check.pg.slt' sleep 1 # check sink destination mysql using shell -if mysql --host=mysql --port=3306 -u root -p123456 -sN -e "SELECT * FROM test.t_remote ORDER BY id;" | awk '{ -if ($1 == 1 && $2 == "Alex") c1++; - if ($1 == 3 && $2 == "Carl") c2++; - if ($1 == 4 && $2 == "Doris") c3++; - if ($1 == 5 && $2 == "Eve") c4++; - if ($1 == 6 && $2 == "Frank") c5++; } - END { exit !(c1 == 1 && c2 == 1 && c3 == 1 && c4 == 1 && c5 == 1); }'; then +diff -u ./e2e_test/sink/remote/mysql_expected_result.tsv \ +<(mysql --host=mysql --port=3306 -u root -p123456 -s -N -r test -e "SELECT * FROM test.t_remote ORDER BY id") +if [ $? -eq 0 ]; then echo "mysql sink check passed" else echo "The output is not as expected." diff --git a/ci/scripts/e2e-source-test.sh b/ci/scripts/e2e-source-test.sh index 010ee0f4ce94a..f3040da60968a 100755 --- a/ci/scripts/e2e-source-test.sh +++ b/ci/scripts/e2e-source-test.sh @@ -6,7 +6,7 @@ set -euo pipefail source ci/scripts/common.env.sh # prepare environment -export CONNECTOR_RPC_ENDPOINT="localhost:60061" +export CONNECTOR_RPC_ENDPOINT="localhost:50051" while getopts 'p:' opt; do case ${opt} in @@ -28,8 +28,12 @@ echo "--- Download artifacts" mkdir -p target/debug buildkite-agent artifact download risingwave-"$profile" target/debug/ buildkite-agent artifact download risedev-dev-"$profile" target/debug/ +buildkite-agent artifact download librisingwave_java_binding.so-"$profile" target/debug mv target/debug/risingwave-"$profile" target/debug/risingwave mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev +mv target/debug/librisingwave_java_binding.so-"$profile" target/debug/librisingwave_java_binding.so + +export RW_JAVA_BINDING_LIB_PATH=${PWD}/target/debug echo "--- Download connector node package" @@ -65,7 +69,10 @@ psql -h db -U postgres -d cdc_test < ./e2e_test/source/cdc/postgres_cdc.sql node_port=50051 node_timeout=10 -./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-source.log 2>&1 & + +echo "--- starting risingwave cluster with connector node" +cargo make ci-start ci-1cn-1fe-with-recovery +./connector-node/start-service.sh -p $node_port > .risingwave/log/connector-node.log 2>&1 & echo "waiting for connector node to start" start_time=$(date +%s) @@ -84,9 +91,6 @@ do fi sleep 0.1 done - -# start risingwave cluster -cargo make ci-start ci-1cn-1fe-with-recovery sleep 2 echo "---- mysql & postgres cdc validate test" diff --git a/ci/scripts/e2e-test-parallel-for-opendal.sh b/ci/scripts/e2e-test-parallel-for-opendal.sh new file mode 100755 index 0000000000000..3a1a80445a454 --- /dev/null +++ b/ci/scripts/e2e-test-parallel-for-opendal.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# Exits as soon as any line fails. +set -euo pipefail + +source ci/scripts/common.env.sh + +while getopts 'p:' opt; do + case ${opt} in + p ) + profile=$OPTARG + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + ;; + esac +done +shift $((OPTIND -1)) + +echo "--- Download artifacts" +mkdir -p target/debug +buildkite-agent artifact download risingwave-"$profile" target/debug/ +buildkite-agent artifact download risedev-dev-"$profile" target/debug/ +buildkite-agent artifact download "e2e_test/generated/*" ./ +mv target/debug/risingwave-"$profile" target/debug/risingwave +mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev + +echo "--- Adjust permission" +chmod +x ./target/debug/risingwave +chmod +x ./target/debug/risedev-dev + +echo "--- Generate RiseDev CI config" +cp ci/risedev-components.ci.env risedev-components.user.env + +echo "--- Prepare RiseDev dev cluster" +cargo make pre-start-dev +cargo make link-all-in-one-binaries + +host_args="-h localhost -p 4565 -h localhost -p 4566 -h localhost -p 4567" + +echo "--- e2e, ci-3cn-3fe-opendal-fs-backend, streaming" +RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \ +cargo make ci-start ci-3cn-3fe-opendal-fs-backend +sqllogictest ${host_args} -d dev './e2e_test/streaming/**/*.slt' -j 16 --junit "parallel-opendal-fs-backend-${profile}" + +echo "--- Kill cluster" +rm -rf /tmp/rw_ci +cargo make ci-kill + + +echo "--- e2e, ci-3cn-3fe-opendal-fs-backend, batch" +RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \ +cargo make ci-start ci-3cn-3fe-opendal-fs-backend +sqllogictest ${host_args} -d dev './e2e_test/ddl/**/*.slt' --junit "parallel-opendal-fs-backend-ddl-${profile}" +sqllogictest ${host_args} -d dev './e2e_test/batch/**/*.slt' -j 16 --junit "parallel-opendal-fs-backend-batch-${profile}" + +echo "--- Kill cluster" +rm -rf /tmp/rw_ci +cargo make ci-kill \ No newline at end of file diff --git a/ci/scripts/e2e-test-parallel-in-memory.sh b/ci/scripts/e2e-test-parallel-in-memory.sh index 1225d754c5698..d161bc6f6befd 100755 --- a/ci/scripts/e2e-test-parallel-in-memory.sh +++ b/ci/scripts/e2e-test-parallel-in-memory.sh @@ -54,4 +54,4 @@ sqllogictest ${host_args} -d dev './e2e_test/ddl/**/*.slt' --junit "parallel-in sqllogictest ${host_args} -d dev './e2e_test/batch/**/*.slt' -j 16 --junit "parallel-in-memory-batch-${profile}" echo "--- Kill cluster" -cargo make ci-kill +cargo make ci-kill \ No newline at end of file diff --git a/ci/scripts/java-binding-test.sh b/ci/scripts/java-binding-test.sh index 9dca6068a6915..8818bd7d8a412 100755 --- a/ci/scripts/java-binding-test.sh +++ b/ci/scripts/java-binding-test.sh @@ -53,3 +53,6 @@ cargo make ingest-data-and-run-java-binding echo "--- Kill cluster" cargo make ci-kill + +echo "--- run stream chunk java binding" +cargo make run-java-binding-stream-chunk-demo diff --git a/ci/scripts/run-e2e-test.sh b/ci/scripts/run-e2e-test.sh index cc29af13c2fdf..2140503ba1915 100755 --- a/ci/scripts/run-e2e-test.sh +++ b/ci/scripts/run-e2e-test.sh @@ -24,12 +24,15 @@ mkdir -p target/debug buildkite-agent artifact download risingwave-"$profile" target/debug/ buildkite-agent artifact download risedev-dev-"$profile" target/debug/ buildkite-agent artifact download "e2e_test/generated/*" ./ +buildkite-agent artifact download risingwave_e2e_extended_mode_test-"$profile" target/debug/ mv target/debug/risingwave-"$profile" target/debug/risingwave mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev +mv target/debug/risingwave_e2e_extended_mode_test-"$profile" target/debug/risingwave_e2e_extended_mode_test echo "--- Adjust permission" chmod +x ./target/debug/risingwave chmod +x ./target/debug/risedev-dev +chmod +x ./target/debug/risingwave_e2e_extended_mode_test echo "--- Generate RiseDev CI config" cp ci/risedev-components.ci.env risedev-components.user.env @@ -75,7 +78,10 @@ cargo make ci-kill echo "--- e2e, ci-3cn-1fe, extended query" RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \ cargo make ci-start ci-3cn-1fe -sqllogictest -p 4566 -d dev -e postgres-extended './e2e_test/extended_query/**/*.slt' +sqllogictest -p 4566 -d dev -e postgres-extended './e2e_test/extended_mode/**/*.slt' +RUST_BACKTRACE=1 target/debug/risingwave_e2e_extended_mode_test --host 127.0.0.1 \ + -p 4566 \ + -u root echo "--- Kill cluster" cargo make ci-kill diff --git a/ci/scripts/s3-source-test-for-opendal-fs-engine.sh b/ci/scripts/s3-source-test-for-opendal-fs-engine.sh new file mode 100755 index 0000000000000..20fd77483a7a9 --- /dev/null +++ b/ci/scripts/s3-source-test-for-opendal-fs-engine.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source ci/scripts/common.env.sh + +while getopts 'p:s:' opt; do + case ${opt} in + p ) + profile=$OPTARG + ;; + s ) + script=$OPTARG + ;; + \? ) + echo "Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" 1>&2 + ;; + esac +done +shift $((OPTIND -1)) + + + +echo "--- Download artifacts" +mkdir -p target/debug +buildkite-agent artifact download risingwave-"$profile" target/debug/ +buildkite-agent artifact download risedev-dev-"$profile" target/debug/ + +mv target/debug/risingwave-"$profile" target/debug/risingwave +mv target/debug/risedev-dev-"$profile" target/debug/risedev-dev + +echo "--- Adjust permission" +chmod +x ./target/debug/risingwave +chmod +x ./target/debug/risedev-dev + +echo "--- Generate RiseDev CI config" +cp ci/risedev-components.ci.env risedev-components.user.env + +echo "--- Prepare RiseDev dev cluster" +cargo make pre-start-dev +cargo make link-all-in-one-binaries + +echo "--- starting risingwave cluster with connector node" +cargo make ci-start ci-3cn-3fe-opendal-fs-backend + +echo "--- Run test" +python3 -m pip install minio psycopg2-binary +python3 e2e_test/s3/$script.py + +echo "--- Kill cluster" +rm -rf /tmp/rw_ci +cargo make ci-kill diff --git a/ci/scripts/s3-source-test.sh b/ci/scripts/s3-source-test.sh index 58c2cbd93863f..4d482568cad26 100755 --- a/ci/scripts/s3-source-test.sh +++ b/ci/scripts/s3-source-test.sh @@ -4,11 +4,14 @@ set -euo pipefail source ci/scripts/common.env.sh -while getopts 'p:' opt; do +while getopts 'p:s:' opt; do case ${opt} in p ) profile=$OPTARG ;; + s ) + script=$OPTARG + ;; \? ) echo "Invalid Option: -$OPTARG" 1>&2 exit 1 @@ -20,6 +23,8 @@ while getopts 'p:' opt; do done shift $((OPTIND -1)) + + echo "--- Download artifacts" mkdir -p target/debug buildkite-agent artifact download risingwave-"$profile" target/debug/ @@ -44,7 +49,7 @@ cargo make ci-start ci-1cn-1fe echo "--- Run test" python3 -m pip install minio psycopg2-binary -python3 e2e_test/s3/run.py +python3 e2e_test/s3/$script.py echo "--- Kill cluster" cargo make ci-kill diff --git a/ci/workflows/main-cron.yml b/ci/workflows/main-cron.yml index 766320c4fd34f..88854253efeda 100644 --- a/ci/workflows/main-cron.yml +++ b/ci/workflows/main-cron.yml @@ -130,7 +130,7 @@ steps: retry: *auto-retry - label: "scaling test (deterministic simulation)" - command: "TEST_NUM=60 timeout 110m ci/scripts/deterministic-scale-test.sh" + command: "TEST_NUM=60 timeout 70m ci/scripts/deterministic-scale-test.sh" depends_on: "build-simulation" plugins: - gencer/cache#v2.4.10: *cargo-cache @@ -138,7 +138,7 @@ steps: run: rw-build-env config: ci/docker-compose.yml mount-buildkite-agent: true - timeout_in_minutes: 120 + timeout_in_minutes: 70 retry: *auto-retry - label: "end-to-end test (deterministic simulation)" @@ -184,8 +184,8 @@ steps: timeout_in_minutes: 5 retry: *auto-retry - - label: "S3 source check on AWS" - command: "ci/scripts/s3-source-test.sh -p ci-release" + - label: "S3 source check on AWS (json parser)" + command: "ci/scripts/s3-source-test.sh -p ci-release -s run" depends_on: build plugins: - seek-oss/aws-sm#v2.3.1: @@ -200,8 +200,8 @@ steps: timeout_in_minutes: 20 retry: *auto-retry - - label: "S3 source check on lyvecloud.seagate.com" - command: "ci/scripts/s3-source-test.sh -p ci-release" + - label: "S3 source check on lyvecloud.seagate.com (json parser)" + command: "ci/scripts/s3-source-test.sh -p ci-release -s run" depends_on: build plugins: - seek-oss/aws-sm#v2.3.1: @@ -215,3 +215,35 @@ steps: - S3_SOURCE_TEST_CONF timeout_in_minutes: 20 retry: *auto-retry + + - label: "S3 source check on AWS (csv parser)" + command: "ci/scripts/s3-source-test.sh -p ci-release -s run_csv" + depends_on: build + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + S3_SOURCE_TEST_CONF: ci_s3_source_test_aws + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - S3_SOURCE_TEST_CONF + timeout_in_minutes: 20 + retry: *auto-retry + + - label: "S3 source on OpenDAL fs engine" + command: "ci/scripts/s3-source-test-for-opendal-fs-engine.sh -p ci-release -s run" + depends_on: build + plugins: + - seek-oss/aws-sm#v2.3.1: + env: + S3_SOURCE_TEST_CONF: ci_s3_source_test_aws + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + environment: + - S3_SOURCE_TEST_CONF + timeout_in_minutes: 20 + retry: *auto-retry \ No newline at end of file diff --git a/ci/workflows/main.yml b/ci/workflows/main.yml index 767faecb3886b..92906e25df9c7 100644 --- a/ci/workflows/main.yml +++ b/ci/workflows/main.yml @@ -228,7 +228,7 @@ steps: retry: *auto-retry - label: "scaling test (deterministic simulation)" - command: "TEST_NUM=30 timeout 50m ci/scripts/deterministic-scale-test.sh" + command: "TEST_NUM=30 timeout 40m ci/scripts/deterministic-scale-test.sh" depends_on: "build-simulation" plugins: - gencer/cache#v2.4.10: *cargo-cache @@ -236,7 +236,7 @@ steps: run: rw-build-env config: ci/docker-compose.yml mount-buildkite-agent: true - timeout_in_minutes: 55 + timeout_in_minutes: 40 # TODO: split into multiple jobs retry: *auto-retry - label: "end-to-end test (deterministic simulation)" diff --git a/ci/workflows/pull-request.yml b/ci/workflows/pull-request.yml index ccb6d7d6999f1..0097900d5561c 100644 --- a/ci/workflows/pull-request.yml +++ b/ci/workflows/pull-request.yml @@ -113,6 +113,21 @@ steps: timeout_in_minutes: 12 retry: *auto-retry + - label: "end-to-end test for opendal (parallel)" + command: "ci/scripts/e2e-test-parallel-for-opendal.sh -p ci-dev" + depends_on: + - "build" + - "docslt" + plugins: + - gencer/cache#v2.4.10: *cargo-cache + - docker-compose#v4.9.0: + run: rw-build-env + config: ci/docker-compose.yml + mount-buildkite-agent: true + - ./ci/plugins/upload-failure-logs + timeout_in_minutes: 12 + retry: *auto-retry + - label: "end-to-end test (parallel, in-memory)" command: "ci/scripts/e2e-test-parallel-in-memory.sh -p ci-dev" depends_on: "build" @@ -228,6 +243,7 @@ steps: - ./ci/plugins/upload-failure-logs timeout_in_minutes: 15 retry: *auto-retry + soft_fail: true - label: "check" command: "ci/scripts/check.sh" @@ -296,7 +312,7 @@ steps: # files: "*-junit.xml" # format: "junit" - ./ci/plugins/upload-failure-logs - timeout_in_minutes: 15 + timeout_in_minutes: 18 retry: *auto-retry - label: "misc check" diff --git a/dashboard/.gitignore b/dashboard/.gitignore index 922d92a5745e8..7d340654380ed 100644 --- a/dashboard/.gitignore +++ b/dashboard/.gitignore @@ -16,10 +16,13 @@ /build # misc -.DS_Store +**/*.DS_Store .env* # debug npm-debug.log* yarn-debug.log* yarn-error.log* + +# generated proto +proto/gen diff --git a/dashboard/README.md b/dashboard/README.md index 5a0b9facf75a7..28a9c29f4c6e8 100644 --- a/dashboard/README.md +++ b/dashboard/README.md @@ -1,11 +1,15 @@ +# Dashboard + The framework: [Next.js](https://nextjs.org). Next.js supports SPA, SSG and SSR. With this feature, the dashboard ui can be deployed in + 1. Standalone machine serving only dashboard UI with a backend. (e.g. Dashboard for cloud product) 2. Meta service node. (e.g. Static HTML files integrated in meta service without any other dependencies like node.js) ## Files -``` + +```plain dashboard/ --.next/ (generated by nextjs) --node_modules/ (development dependencies) @@ -19,47 +23,67 @@ dashboard/ ``` ## Testing + TODO: Find a suitable testing framework ## Development + Start the RisingWave database, remove drop tables from `tpch_snapshot.slt` + ```bash ./risedev d sqllogictest -p 4566 -d dev './e2e_test/streaming/tpch_snapshot.slt' ``` + Install Dependencies. + ```bash npm i ``` + The website will be served at port 3000. + ```bash npm run dev ``` + You should also run: -``` + +```bash node mock-server.js ``` + To start a mock API server when developing. You can use `fetch.sh` to update the mock APIs. ## Test with RisingWave meta node -To replace the built static files in RisingWave with your newest code, + +To replace the built static files in RisingWave with your newest code, run the following scripts in the root directory. -``` + +```bash ./risedev export-dashboard-v2 ``` - ## Deployment -#### Static HTML files + +### Generate the protos + +Running `npm i` will generate the proto files under `proto/gen` automatically. In case there are modifications to the protos, you can regenerate them using the command npm run gen-proto. + +### Static HTML files + Build static files for standalone deployment without node.js. The built files are generated at `./out`. Check more details at [Static HTML Export](https://nextjs.org/docs/advanced-features/static-html-export). + ```bash npm run build-static ``` #### Next.js app + The built files are generated at `./.next`. + ```bash npm run build npm run start -``` \ No newline at end of file +``` diff --git a/dashboard/package-lock.json b/dashboard/package-lock.json index 2d741cd099d19..5c5842cdca159 100644 --- a/dashboard/package-lock.json +++ b/dashboard/package-lock.json @@ -4,6 +4,7 @@ "requires": true, "packages": { "": { + "hasInstallScript": true, "dependencies": { "@chakra-ui/react": "^2.3.1", "@emotion/react": "^11.10.4", diff --git a/dashboard/package.json b/dashboard/package.json index dd50d36471ce6..f809ce3117097 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -8,7 +8,8 @@ "gen-proto": "./scripts/generate_proto.sh", "lint": "prettier --check . && next lint", "format": "prettier --write . && next lint --fix", - "mock-server": "node ./mock-server.js" + "mock-server": "node ./mock-server.js", + "postinstall": "npm run gen-proto" }, "dependencies": { "@chakra-ui/react": "^2.3.1", diff --git a/dashboard/proto/gen/backup_service.ts b/dashboard/proto/gen/backup_service.ts deleted file mode 100644 index a30c09c2ae231..0000000000000 --- a/dashboard/proto/gen/backup_service.ts +++ /dev/null @@ -1,390 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "backup_service"; - -export const BackupJobStatus = { - UNSPECIFIED: "UNSPECIFIED", - RUNNING: "RUNNING", - SUCCEEDED: "SUCCEEDED", - /** - * NOT_FOUND - NOT_FOUND indicates one of these cases: - * - Invalid job id. - * - Job has failed. - * - Job has succeeded, but its resulted backup has been deleted later. - */ - NOT_FOUND: "NOT_FOUND", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type BackupJobStatus = typeof BackupJobStatus[keyof typeof BackupJobStatus]; - -export function backupJobStatusFromJSON(object: any): BackupJobStatus { - switch (object) { - case 0: - case "UNSPECIFIED": - return BackupJobStatus.UNSPECIFIED; - case 1: - case "RUNNING": - return BackupJobStatus.RUNNING; - case 2: - case "SUCCEEDED": - return BackupJobStatus.SUCCEEDED; - case 3: - case "NOT_FOUND": - return BackupJobStatus.NOT_FOUND; - case -1: - case "UNRECOGNIZED": - default: - return BackupJobStatus.UNRECOGNIZED; - } -} - -export function backupJobStatusToJSON(object: BackupJobStatus): string { - switch (object) { - case BackupJobStatus.UNSPECIFIED: - return "UNSPECIFIED"; - case BackupJobStatus.RUNNING: - return "RUNNING"; - case BackupJobStatus.SUCCEEDED: - return "SUCCEEDED"; - case BackupJobStatus.NOT_FOUND: - return "NOT_FOUND"; - case BackupJobStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface MetaBackupManifestId { - id: number; -} - -export interface BackupMetaRequest { -} - -export interface BackupMetaResponse { - jobId: number; -} - -export interface GetBackupJobStatusRequest { - jobId: number; -} - -export interface GetBackupJobStatusResponse { - jobId: number; - jobStatus: BackupJobStatus; -} - -export interface DeleteMetaSnapshotRequest { - snapshotIds: number[]; -} - -export interface DeleteMetaSnapshotResponse { -} - -export interface GetMetaSnapshotManifestRequest { -} - -export interface GetMetaSnapshotManifestResponse { - manifest: MetaSnapshotManifest | undefined; -} - -export interface MetaSnapshotManifest { - manifestId: number; - snapshotMetadata: MetaSnapshotMetadata[]; -} - -export interface MetaSnapshotMetadata { - id: number; - hummockVersionId: number; - maxCommittedEpoch: number; - safeEpoch: number; -} - -function createBaseMetaBackupManifestId(): MetaBackupManifestId { - return { id: 0 }; -} - -export const MetaBackupManifestId = { - fromJSON(object: any): MetaBackupManifestId { - return { id: isSet(object.id) ? Number(object.id) : 0 }; - }, - - toJSON(message: MetaBackupManifestId): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - return obj; - }, - - fromPartial, I>>(object: I): MetaBackupManifestId { - const message = createBaseMetaBackupManifestId(); - message.id = object.id ?? 0; - return message; - }, -}; - -function createBaseBackupMetaRequest(): BackupMetaRequest { - return {}; -} - -export const BackupMetaRequest = { - fromJSON(_: any): BackupMetaRequest { - return {}; - }, - - toJSON(_: BackupMetaRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): BackupMetaRequest { - const message = createBaseBackupMetaRequest(); - return message; - }, -}; - -function createBaseBackupMetaResponse(): BackupMetaResponse { - return { jobId: 0 }; -} - -export const BackupMetaResponse = { - fromJSON(object: any): BackupMetaResponse { - return { jobId: isSet(object.jobId) ? Number(object.jobId) : 0 }; - }, - - toJSON(message: BackupMetaResponse): unknown { - const obj: any = {}; - message.jobId !== undefined && (obj.jobId = Math.round(message.jobId)); - return obj; - }, - - fromPartial, I>>(object: I): BackupMetaResponse { - const message = createBaseBackupMetaResponse(); - message.jobId = object.jobId ?? 0; - return message; - }, -}; - -function createBaseGetBackupJobStatusRequest(): GetBackupJobStatusRequest { - return { jobId: 0 }; -} - -export const GetBackupJobStatusRequest = { - fromJSON(object: any): GetBackupJobStatusRequest { - return { jobId: isSet(object.jobId) ? Number(object.jobId) : 0 }; - }, - - toJSON(message: GetBackupJobStatusRequest): unknown { - const obj: any = {}; - message.jobId !== undefined && (obj.jobId = Math.round(message.jobId)); - return obj; - }, - - fromPartial, I>>(object: I): GetBackupJobStatusRequest { - const message = createBaseGetBackupJobStatusRequest(); - message.jobId = object.jobId ?? 0; - return message; - }, -}; - -function createBaseGetBackupJobStatusResponse(): GetBackupJobStatusResponse { - return { jobId: 0, jobStatus: BackupJobStatus.UNSPECIFIED }; -} - -export const GetBackupJobStatusResponse = { - fromJSON(object: any): GetBackupJobStatusResponse { - return { - jobId: isSet(object.jobId) ? Number(object.jobId) : 0, - jobStatus: isSet(object.jobStatus) ? backupJobStatusFromJSON(object.jobStatus) : BackupJobStatus.UNSPECIFIED, - }; - }, - - toJSON(message: GetBackupJobStatusResponse): unknown { - const obj: any = {}; - message.jobId !== undefined && (obj.jobId = Math.round(message.jobId)); - message.jobStatus !== undefined && (obj.jobStatus = backupJobStatusToJSON(message.jobStatus)); - return obj; - }, - - fromPartial, I>>(object: I): GetBackupJobStatusResponse { - const message = createBaseGetBackupJobStatusResponse(); - message.jobId = object.jobId ?? 0; - message.jobStatus = object.jobStatus ?? BackupJobStatus.UNSPECIFIED; - return message; - }, -}; - -function createBaseDeleteMetaSnapshotRequest(): DeleteMetaSnapshotRequest { - return { snapshotIds: [] }; -} - -export const DeleteMetaSnapshotRequest = { - fromJSON(object: any): DeleteMetaSnapshotRequest { - return { snapshotIds: Array.isArray(object?.snapshotIds) ? object.snapshotIds.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: DeleteMetaSnapshotRequest): unknown { - const obj: any = {}; - if (message.snapshotIds) { - obj.snapshotIds = message.snapshotIds.map((e) => Math.round(e)); - } else { - obj.snapshotIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DeleteMetaSnapshotRequest { - const message = createBaseDeleteMetaSnapshotRequest(); - message.snapshotIds = object.snapshotIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseDeleteMetaSnapshotResponse(): DeleteMetaSnapshotResponse { - return {}; -} - -export const DeleteMetaSnapshotResponse = { - fromJSON(_: any): DeleteMetaSnapshotResponse { - return {}; - }, - - toJSON(_: DeleteMetaSnapshotResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): DeleteMetaSnapshotResponse { - const message = createBaseDeleteMetaSnapshotResponse(); - return message; - }, -}; - -function createBaseGetMetaSnapshotManifestRequest(): GetMetaSnapshotManifestRequest { - return {}; -} - -export const GetMetaSnapshotManifestRequest = { - fromJSON(_: any): GetMetaSnapshotManifestRequest { - return {}; - }, - - toJSON(_: GetMetaSnapshotManifestRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetMetaSnapshotManifestRequest { - const message = createBaseGetMetaSnapshotManifestRequest(); - return message; - }, -}; - -function createBaseGetMetaSnapshotManifestResponse(): GetMetaSnapshotManifestResponse { - return { manifest: undefined }; -} - -export const GetMetaSnapshotManifestResponse = { - fromJSON(object: any): GetMetaSnapshotManifestResponse { - return { manifest: isSet(object.manifest) ? MetaSnapshotManifest.fromJSON(object.manifest) : undefined }; - }, - - toJSON(message: GetMetaSnapshotManifestResponse): unknown { - const obj: any = {}; - message.manifest !== undefined && - (obj.manifest = message.manifest ? MetaSnapshotManifest.toJSON(message.manifest) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetMetaSnapshotManifestResponse { - const message = createBaseGetMetaSnapshotManifestResponse(); - message.manifest = (object.manifest !== undefined && object.manifest !== null) - ? MetaSnapshotManifest.fromPartial(object.manifest) - : undefined; - return message; - }, -}; - -function createBaseMetaSnapshotManifest(): MetaSnapshotManifest { - return { manifestId: 0, snapshotMetadata: [] }; -} - -export const MetaSnapshotManifest = { - fromJSON(object: any): MetaSnapshotManifest { - return { - manifestId: isSet(object.manifestId) ? Number(object.manifestId) : 0, - snapshotMetadata: Array.isArray(object?.snapshotMetadata) - ? object.snapshotMetadata.map((e: any) => MetaSnapshotMetadata.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MetaSnapshotManifest): unknown { - const obj: any = {}; - message.manifestId !== undefined && (obj.manifestId = Math.round(message.manifestId)); - if (message.snapshotMetadata) { - obj.snapshotMetadata = message.snapshotMetadata.map((e) => e ? MetaSnapshotMetadata.toJSON(e) : undefined); - } else { - obj.snapshotMetadata = []; - } - return obj; - }, - - fromPartial, I>>(object: I): MetaSnapshotManifest { - const message = createBaseMetaSnapshotManifest(); - message.manifestId = object.manifestId ?? 0; - message.snapshotMetadata = object.snapshotMetadata?.map((e) => MetaSnapshotMetadata.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMetaSnapshotMetadata(): MetaSnapshotMetadata { - return { id: 0, hummockVersionId: 0, maxCommittedEpoch: 0, safeEpoch: 0 }; -} - -export const MetaSnapshotMetadata = { - fromJSON(object: any): MetaSnapshotMetadata { - return { - id: isSet(object.id) ? Number(object.id) : 0, - hummockVersionId: isSet(object.hummockVersionId) ? Number(object.hummockVersionId) : 0, - maxCommittedEpoch: isSet(object.maxCommittedEpoch) ? Number(object.maxCommittedEpoch) : 0, - safeEpoch: isSet(object.safeEpoch) ? Number(object.safeEpoch) : 0, - }; - }, - - toJSON(message: MetaSnapshotMetadata): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.hummockVersionId !== undefined && (obj.hummockVersionId = Math.round(message.hummockVersionId)); - message.maxCommittedEpoch !== undefined && (obj.maxCommittedEpoch = Math.round(message.maxCommittedEpoch)); - message.safeEpoch !== undefined && (obj.safeEpoch = Math.round(message.safeEpoch)); - return obj; - }, - - fromPartial, I>>(object: I): MetaSnapshotMetadata { - const message = createBaseMetaSnapshotMetadata(); - message.id = object.id ?? 0; - message.hummockVersionId = object.hummockVersionId ?? 0; - message.maxCommittedEpoch = object.maxCommittedEpoch ?? 0; - message.safeEpoch = object.safeEpoch ?? 0; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/batch_plan.ts b/dashboard/proto/gen/batch_plan.ts deleted file mode 100644 index 73420d1086fd6..0000000000000 --- a/dashboard/proto/gen/batch_plan.ts +++ /dev/null @@ -1,2472 +0,0 @@ -/* eslint-disable */ -import { StreamSourceInfo } from "./catalog"; -import { - BatchQueryEpoch, - Buffer, - ColumnOrder, - Direction, - directionFromJSON, - directionToJSON, - HostAddress, - WorkerNode, -} from "./common"; -import { IntervalUnit } from "./data"; -import { AggCall, ExprNode, ProjectSetSelectItem, TableFunction } from "./expr"; -import { - ColumnCatalog, - ColumnDesc, - Field, - JoinType, - joinTypeFromJSON, - joinTypeToJSON, - StorageTableDesc, -} from "./plan_common"; - -export const protobufPackage = "batch_plan"; - -export interface RowSeqScanNode { - tableDesc: StorageTableDesc | undefined; - columnIds: number[]; - /** - * All the ranges need to be read. i.e., they are OR'ed. - * - * Empty `scan_ranges` means full table scan. - */ - scanRanges: ScanRange[]; - /** - * The partition to read for scan tasks. - * - * Will be filled by the scheduler. - */ - vnodeBitmap: - | Buffer - | undefined; - /** Whether the order on output columns should be preserved. */ - ordered: boolean; - /** If along with `batch_limit`, `chunk_size` will be set. */ - chunkSize: RowSeqScanNode_ChunkSize | undefined; -} - -export interface RowSeqScanNode_ChunkSize { - chunkSize: number; -} - -export interface SysRowSeqScanNode { - tableId: number; - columnDescs: ColumnDesc[]; -} - -/** - * The range to scan, which specifies a consecutive range of the PK - * and can represent: (Suppose there are N columns in the PK) - * - full table scan: Should not occur. Use an empty `Vec` instead. - * - index range scan: `eq_conds` includes i (between 0 and N-1, inclusive) values, - * and `lower_bound` & `upper_bound` is the range for the (i+1)th column - * - index point get: `eq_conds` includes N values, and `lower_bound` & `upper_bound` are `None` - */ -export interface ScanRange { - /** The i-th element represents the value of the i-th PK column. */ - eqConds: Uint8Array[]; - /** The lower bound of the next PK column subsequent to those in `eq_conds`. */ - lowerBound: - | ScanRange_Bound - | undefined; - /** The upper bound of the next PK column subsequent to those in `eq_conds`. */ - upperBound: ScanRange_Bound | undefined; -} - -/** `None` represent unbounded. */ -export interface ScanRange_Bound { - value: Uint8Array; - inclusive: boolean; -} - -export interface SourceNode { - sourceId: number; - columns: ColumnCatalog[]; - properties: { [key: string]: string }; - split: Uint8Array; - info: StreamSourceInfo | undefined; -} - -export interface SourceNode_PropertiesEntry { - key: string; - value: string; -} - -export interface ProjectNode { - selectList: ExprNode[]; -} - -export interface FilterNode { - searchCondition: ExprNode | undefined; -} - -export interface InsertNode { - /** Id of the table to perform inserting. */ - tableId: number; - /** Version of the table. */ - tableVersionId: number; - columnIndices: number[]; - /** - * An optional field and will be `None` for tables without user-defined pk. - * The `BatchInsertExecutor` should add a column with NULL value which will - * be filled in streaming. - */ - rowIdIndex?: number | undefined; - returning: boolean; -} - -export interface DeleteNode { - /** Id of the table to perform deleting. */ - tableId: number; - /** Version of the table. */ - tableVersionId: number; - returning: boolean; -} - -export interface UpdateNode { - /** Id of the table to perform updating. */ - tableId: number; - /** Version of the table. */ - tableVersionId: number; - exprs: ExprNode[]; - returning: boolean; -} - -export interface ValuesNode { - tuples: ValuesNode_ExprTuple[]; - fields: Field[]; -} - -export interface ValuesNode_ExprTuple { - cells: ExprNode[]; -} - -export interface SortNode { - columnOrders: ColumnOrder[]; -} - -export interface TopNNode { - columnOrders: ColumnOrder[]; - limit: number; - offset: number; - withTies: boolean; -} - -export interface GroupTopNNode { - columnOrders: ColumnOrder[]; - limit: number; - offset: number; - groupKey: number[]; - withTies: boolean; -} - -export interface LimitNode { - limit: number; - offset: number; -} - -export interface NestedLoopJoinNode { - joinType: JoinType; - joinCond: ExprNode | undefined; - outputIndices: number[]; -} - -export interface HashAggNode { - groupKey: number[]; - aggCalls: AggCall[]; -} - -export interface ExpandNode { - columnSubsets: ExpandNode_Subset[]; -} - -export interface ExpandNode_Subset { - columnIndices: number[]; -} - -export interface ProjectSetNode { - selectList: ProjectSetSelectItem[]; -} - -export interface SortAggNode { - groupKey: ExprNode[]; - aggCalls: AggCall[]; -} - -export interface HashJoinNode { - joinType: JoinType; - leftKey: number[]; - rightKey: number[]; - condition: ExprNode | undefined; - outputIndices: number[]; - /** - * Null safe means it treats `null = null` as true. - * Each key pair can be null safe independently. (left_key, right_key, null_safe) - */ - nullSafe: boolean[]; -} - -export interface SortMergeJoinNode { - joinType: JoinType; - leftKey: number[]; - rightKey: number[]; - direction: Direction; - outputIndices: number[]; -} - -export interface HopWindowNode { - timeCol: number; - windowSlide: IntervalUnit | undefined; - windowSize: IntervalUnit | undefined; - outputIndices: number[]; - windowStartExprs: ExprNode[]; - windowEndExprs: ExprNode[]; -} - -export interface TableFunctionNode { - tableFunction: TableFunction | undefined; -} - -/** Task is a running instance of Stage. */ -export interface TaskId { - queryId: string; - stageId: number; - taskId: number; -} - -/** - * Every task will create N buffers (channels) for parent operators to fetch results from, - * where N is the parallelism of parent stage. - */ -export interface TaskOutputId { - taskId: - | TaskId - | undefined; - /** The id of output channel to fetch from */ - outputId: number; -} - -export interface LocalExecutePlan { - plan: PlanFragment | undefined; - epoch: BatchQueryEpoch | undefined; -} - -/** ExchangeSource describes where to read results from children operators */ -export interface ExchangeSource { - taskOutputId: TaskOutputId | undefined; - host: HostAddress | undefined; - localExecutePlan?: { $case: "plan"; plan: LocalExecutePlan }; -} - -export interface ExchangeNode { - sources: ExchangeSource[]; - inputSchema: Field[]; -} - -export interface MergeSortExchangeNode { - exchange: ExchangeNode | undefined; - columnOrders: ColumnOrder[]; -} - -export interface LocalLookupJoinNode { - joinType: JoinType; - condition: ExprNode | undefined; - outerSideKey: number[]; - innerSideKey: number[]; - lookupPrefixLen: number; - innerSideTableDesc: StorageTableDesc | undefined; - innerSideVnodeMapping: number[]; - innerSideColumnIds: number[]; - outputIndices: number[]; - workerNodes: WorkerNode[]; - /** - * Null safe means it treats `null = null` as true. - * Each key pair can be null safe independently. (left_key, right_key, null_safe) - */ - nullSafe: boolean[]; -} - -/** - * RFC: A new schedule way for distributed lookup join - * https://github.com/risingwavelabs/rfcs/pull/6 - */ -export interface DistributedLookupJoinNode { - joinType: JoinType; - condition: ExprNode | undefined; - outerSideKey: number[]; - innerSideKey: number[]; - lookupPrefixLen: number; - innerSideTableDesc: StorageTableDesc | undefined; - innerSideColumnIds: number[]; - outputIndices: number[]; - /** - * Null safe means it treats `null = null` as true. - * Each key pair can be null safe independently. (left_key, right_key, null_safe) - */ - nullSafe: boolean[]; -} - -export interface UnionNode { -} - -export interface PlanNode { - children: PlanNode[]; - nodeBody?: - | { $case: "insert"; insert: InsertNode } - | { $case: "delete"; delete: DeleteNode } - | { $case: "update"; update: UpdateNode } - | { $case: "project"; project: ProjectNode } - | { $case: "hashAgg"; hashAgg: HashAggNode } - | { $case: "filter"; filter: FilterNode } - | { $case: "exchange"; exchange: ExchangeNode } - | { $case: "sort"; sort: SortNode } - | { $case: "nestedLoopJoin"; nestedLoopJoin: NestedLoopJoinNode } - | { $case: "topN"; topN: TopNNode } - | { $case: "sortAgg"; sortAgg: SortAggNode } - | { $case: "rowSeqScan"; rowSeqScan: RowSeqScanNode } - | { $case: "limit"; limit: LimitNode } - | { $case: "values"; values: ValuesNode } - | { $case: "hashJoin"; hashJoin: HashJoinNode } - | { $case: "mergeSortExchange"; mergeSortExchange: MergeSortExchangeNode } - | { $case: "hopWindow"; hopWindow: HopWindowNode } - | { $case: "tableFunction"; tableFunction: TableFunctionNode } - | { $case: "sysRowSeqScan"; sysRowSeqScan: SysRowSeqScanNode } - | { $case: "expand"; expand: ExpandNode } - | { $case: "localLookupJoin"; localLookupJoin: LocalLookupJoinNode } - | { $case: "projectSet"; projectSet: ProjectSetNode } - | { $case: "union"; union: UnionNode } - | { $case: "groupTopN"; groupTopN: GroupTopNNode } - | { $case: "distributedLookupJoin"; distributedLookupJoin: DistributedLookupJoinNode } - | { $case: "source"; source: SourceNode }; - identity: string; -} - -/** - * ExchangeInfo determines how to distribute results to tasks of next stage. - * - * Note that the fragment itself does not know the where are the receivers. Instead, it prepares results in - * N buffers and wait for parent operators (`Exchange` nodes) to pull data from a specified buffer - */ -export interface ExchangeInfo { - mode: ExchangeInfo_DistributionMode; - distribution?: { $case: "broadcastInfo"; broadcastInfo: ExchangeInfo_BroadcastInfo } | { - $case: "hashInfo"; - hashInfo: ExchangeInfo_HashInfo; - } | { $case: "consistentHashInfo"; consistentHashInfo: ExchangeInfo_ConsistentHashInfo }; -} - -export const ExchangeInfo_DistributionMode = { - /** UNSPECIFIED - No partitioning at all, used for root segment which aggregates query results */ - UNSPECIFIED: "UNSPECIFIED", - SINGLE: "SINGLE", - BROADCAST: "BROADCAST", - HASH: "HASH", - CONSISTENT_HASH: "CONSISTENT_HASH", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type ExchangeInfo_DistributionMode = - typeof ExchangeInfo_DistributionMode[keyof typeof ExchangeInfo_DistributionMode]; - -export function exchangeInfo_DistributionModeFromJSON(object: any): ExchangeInfo_DistributionMode { - switch (object) { - case 0: - case "UNSPECIFIED": - return ExchangeInfo_DistributionMode.UNSPECIFIED; - case 1: - case "SINGLE": - return ExchangeInfo_DistributionMode.SINGLE; - case 2: - case "BROADCAST": - return ExchangeInfo_DistributionMode.BROADCAST; - case 3: - case "HASH": - return ExchangeInfo_DistributionMode.HASH; - case 4: - case "CONSISTENT_HASH": - return ExchangeInfo_DistributionMode.CONSISTENT_HASH; - case -1: - case "UNRECOGNIZED": - default: - return ExchangeInfo_DistributionMode.UNRECOGNIZED; - } -} - -export function exchangeInfo_DistributionModeToJSON(object: ExchangeInfo_DistributionMode): string { - switch (object) { - case ExchangeInfo_DistributionMode.UNSPECIFIED: - return "UNSPECIFIED"; - case ExchangeInfo_DistributionMode.SINGLE: - return "SINGLE"; - case ExchangeInfo_DistributionMode.BROADCAST: - return "BROADCAST"; - case ExchangeInfo_DistributionMode.HASH: - return "HASH"; - case ExchangeInfo_DistributionMode.CONSISTENT_HASH: - return "CONSISTENT_HASH"; - case ExchangeInfo_DistributionMode.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface ExchangeInfo_BroadcastInfo { - count: number; -} - -export interface ExchangeInfo_HashInfo { - outputCount: number; - key: number[]; -} - -export interface ExchangeInfo_ConsistentHashInfo { - /** `vmap` maps virtual node to down stream task id */ - vmap: number[]; - key: number[]; -} - -export interface PlanFragment { - root: PlanNode | undefined; - exchangeInfo: ExchangeInfo | undefined; -} - -function createBaseRowSeqScanNode(): RowSeqScanNode { - return { - tableDesc: undefined, - columnIds: [], - scanRanges: [], - vnodeBitmap: undefined, - ordered: false, - chunkSize: undefined, - }; -} - -export const RowSeqScanNode = { - fromJSON(object: any): RowSeqScanNode { - return { - tableDesc: isSet(object.tableDesc) ? StorageTableDesc.fromJSON(object.tableDesc) : undefined, - columnIds: Array.isArray(object?.columnIds) ? object.columnIds.map((e: any) => Number(e)) : [], - scanRanges: Array.isArray(object?.scanRanges) ? object.scanRanges.map((e: any) => ScanRange.fromJSON(e)) : [], - vnodeBitmap: isSet(object.vnodeBitmap) ? Buffer.fromJSON(object.vnodeBitmap) : undefined, - ordered: isSet(object.ordered) ? Boolean(object.ordered) : false, - chunkSize: isSet(object.chunkSize) ? RowSeqScanNode_ChunkSize.fromJSON(object.chunkSize) : undefined, - }; - }, - - toJSON(message: RowSeqScanNode): unknown { - const obj: any = {}; - message.tableDesc !== undefined && - (obj.tableDesc = message.tableDesc ? StorageTableDesc.toJSON(message.tableDesc) : undefined); - if (message.columnIds) { - obj.columnIds = message.columnIds.map((e) => Math.round(e)); - } else { - obj.columnIds = []; - } - if (message.scanRanges) { - obj.scanRanges = message.scanRanges.map((e) => e ? ScanRange.toJSON(e) : undefined); - } else { - obj.scanRanges = []; - } - message.vnodeBitmap !== undefined && - (obj.vnodeBitmap = message.vnodeBitmap ? Buffer.toJSON(message.vnodeBitmap) : undefined); - message.ordered !== undefined && (obj.ordered = message.ordered); - message.chunkSize !== undefined && - (obj.chunkSize = message.chunkSize ? RowSeqScanNode_ChunkSize.toJSON(message.chunkSize) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): RowSeqScanNode { - const message = createBaseRowSeqScanNode(); - message.tableDesc = (object.tableDesc !== undefined && object.tableDesc !== null) - ? StorageTableDesc.fromPartial(object.tableDesc) - : undefined; - message.columnIds = object.columnIds?.map((e) => e) || []; - message.scanRanges = object.scanRanges?.map((e) => ScanRange.fromPartial(e)) || []; - message.vnodeBitmap = (object.vnodeBitmap !== undefined && object.vnodeBitmap !== null) - ? Buffer.fromPartial(object.vnodeBitmap) - : undefined; - message.ordered = object.ordered ?? false; - message.chunkSize = (object.chunkSize !== undefined && object.chunkSize !== null) - ? RowSeqScanNode_ChunkSize.fromPartial(object.chunkSize) - : undefined; - return message; - }, -}; - -function createBaseRowSeqScanNode_ChunkSize(): RowSeqScanNode_ChunkSize { - return { chunkSize: 0 }; -} - -export const RowSeqScanNode_ChunkSize = { - fromJSON(object: any): RowSeqScanNode_ChunkSize { - return { chunkSize: isSet(object.chunkSize) ? Number(object.chunkSize) : 0 }; - }, - - toJSON(message: RowSeqScanNode_ChunkSize): unknown { - const obj: any = {}; - message.chunkSize !== undefined && (obj.chunkSize = Math.round(message.chunkSize)); - return obj; - }, - - fromPartial, I>>(object: I): RowSeqScanNode_ChunkSize { - const message = createBaseRowSeqScanNode_ChunkSize(); - message.chunkSize = object.chunkSize ?? 0; - return message; - }, -}; - -function createBaseSysRowSeqScanNode(): SysRowSeqScanNode { - return { tableId: 0, columnDescs: [] }; -} - -export const SysRowSeqScanNode = { - fromJSON(object: any): SysRowSeqScanNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - columnDescs: Array.isArray(object?.columnDescs) ? object.columnDescs.map((e: any) => ColumnDesc.fromJSON(e)) : [], - }; - }, - - toJSON(message: SysRowSeqScanNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - if (message.columnDescs) { - obj.columnDescs = message.columnDescs.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.columnDescs = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SysRowSeqScanNode { - const message = createBaseSysRowSeqScanNode(); - message.tableId = object.tableId ?? 0; - message.columnDescs = object.columnDescs?.map((e) => ColumnDesc.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseScanRange(): ScanRange { - return { eqConds: [], lowerBound: undefined, upperBound: undefined }; -} - -export const ScanRange = { - fromJSON(object: any): ScanRange { - return { - eqConds: Array.isArray(object?.eqConds) ? object.eqConds.map((e: any) => bytesFromBase64(e)) : [], - lowerBound: isSet(object.lowerBound) ? ScanRange_Bound.fromJSON(object.lowerBound) : undefined, - upperBound: isSet(object.upperBound) ? ScanRange_Bound.fromJSON(object.upperBound) : undefined, - }; - }, - - toJSON(message: ScanRange): unknown { - const obj: any = {}; - if (message.eqConds) { - obj.eqConds = message.eqConds.map((e) => base64FromBytes(e !== undefined ? e : new Uint8Array())); - } else { - obj.eqConds = []; - } - message.lowerBound !== undefined && - (obj.lowerBound = message.lowerBound ? ScanRange_Bound.toJSON(message.lowerBound) : undefined); - message.upperBound !== undefined && - (obj.upperBound = message.upperBound ? ScanRange_Bound.toJSON(message.upperBound) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ScanRange { - const message = createBaseScanRange(); - message.eqConds = object.eqConds?.map((e) => e) || []; - message.lowerBound = (object.lowerBound !== undefined && object.lowerBound !== null) - ? ScanRange_Bound.fromPartial(object.lowerBound) - : undefined; - message.upperBound = (object.upperBound !== undefined && object.upperBound !== null) - ? ScanRange_Bound.fromPartial(object.upperBound) - : undefined; - return message; - }, -}; - -function createBaseScanRange_Bound(): ScanRange_Bound { - return { value: new Uint8Array(), inclusive: false }; -} - -export const ScanRange_Bound = { - fromJSON(object: any): ScanRange_Bound { - return { - value: isSet(object.value) ? bytesFromBase64(object.value) : new Uint8Array(), - inclusive: isSet(object.inclusive) ? Boolean(object.inclusive) : false, - }; - }, - - toJSON(message: ScanRange_Bound): unknown { - const obj: any = {}; - message.value !== undefined && - (obj.value = base64FromBytes(message.value !== undefined ? message.value : new Uint8Array())); - message.inclusive !== undefined && (obj.inclusive = message.inclusive); - return obj; - }, - - fromPartial, I>>(object: I): ScanRange_Bound { - const message = createBaseScanRange_Bound(); - message.value = object.value ?? new Uint8Array(); - message.inclusive = object.inclusive ?? false; - return message; - }, -}; - -function createBaseSourceNode(): SourceNode { - return { sourceId: 0, columns: [], properties: {}, split: new Uint8Array(), info: undefined }; -} - -export const SourceNode = { - fromJSON(object: any): SourceNode { - return { - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnCatalog.fromJSON(e)) : [], - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - split: isSet(object.split) ? bytesFromBase64(object.split) : new Uint8Array(), - info: isSet(object.info) ? StreamSourceInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: SourceNode): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnCatalog.toJSON(e) : undefined); - } else { - obj.columns = []; - } - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.split !== undefined && - (obj.split = base64FromBytes(message.split !== undefined ? message.split : new Uint8Array())); - message.info !== undefined && (obj.info = message.info ? StreamSourceInfo.toJSON(message.info) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SourceNode { - const message = createBaseSourceNode(); - message.sourceId = object.sourceId ?? 0; - message.columns = object.columns?.map((e) => ColumnCatalog.fromPartial(e)) || []; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.split = object.split ?? new Uint8Array(); - message.info = (object.info !== undefined && object.info !== null) - ? StreamSourceInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -function createBaseSourceNode_PropertiesEntry(): SourceNode_PropertiesEntry { - return { key: "", value: "" }; -} - -export const SourceNode_PropertiesEntry = { - fromJSON(object: any): SourceNode_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: SourceNode_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): SourceNode_PropertiesEntry { - const message = createBaseSourceNode_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseProjectNode(): ProjectNode { - return { selectList: [] }; -} - -export const ProjectNode = { - fromJSON(object: any): ProjectNode { - return { - selectList: Array.isArray(object?.selectList) ? object.selectList.map((e: any) => ExprNode.fromJSON(e)) : [], - }; - }, - - toJSON(message: ProjectNode): unknown { - const obj: any = {}; - if (message.selectList) { - obj.selectList = message.selectList.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.selectList = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ProjectNode { - const message = createBaseProjectNode(); - message.selectList = object.selectList?.map((e) => ExprNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseFilterNode(): FilterNode { - return { searchCondition: undefined }; -} - -export const FilterNode = { - fromJSON(object: any): FilterNode { - return { searchCondition: isSet(object.searchCondition) ? ExprNode.fromJSON(object.searchCondition) : undefined }; - }, - - toJSON(message: FilterNode): unknown { - const obj: any = {}; - message.searchCondition !== undefined && - (obj.searchCondition = message.searchCondition ? ExprNode.toJSON(message.searchCondition) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): FilterNode { - const message = createBaseFilterNode(); - message.searchCondition = (object.searchCondition !== undefined && object.searchCondition !== null) - ? ExprNode.fromPartial(object.searchCondition) - : undefined; - return message; - }, -}; - -function createBaseInsertNode(): InsertNode { - return { tableId: 0, tableVersionId: 0, columnIndices: [], rowIdIndex: undefined, returning: false }; -} - -export const InsertNode = { - fromJSON(object: any): InsertNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - tableVersionId: isSet(object.tableVersionId) ? Number(object.tableVersionId) : 0, - columnIndices: Array.isArray(object?.columnIndices) ? object.columnIndices.map((e: any) => Number(e)) : [], - rowIdIndex: isSet(object.rowIdIndex) ? Number(object.rowIdIndex) : undefined, - returning: isSet(object.returning) ? Boolean(object.returning) : false, - }; - }, - - toJSON(message: InsertNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.tableVersionId !== undefined && (obj.tableVersionId = Math.round(message.tableVersionId)); - if (message.columnIndices) { - obj.columnIndices = message.columnIndices.map((e) => Math.round(e)); - } else { - obj.columnIndices = []; - } - message.rowIdIndex !== undefined && (obj.rowIdIndex = Math.round(message.rowIdIndex)); - message.returning !== undefined && (obj.returning = message.returning); - return obj; - }, - - fromPartial, I>>(object: I): InsertNode { - const message = createBaseInsertNode(); - message.tableId = object.tableId ?? 0; - message.tableVersionId = object.tableVersionId ?? 0; - message.columnIndices = object.columnIndices?.map((e) => e) || []; - message.rowIdIndex = object.rowIdIndex ?? undefined; - message.returning = object.returning ?? false; - return message; - }, -}; - -function createBaseDeleteNode(): DeleteNode { - return { tableId: 0, tableVersionId: 0, returning: false }; -} - -export const DeleteNode = { - fromJSON(object: any): DeleteNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - tableVersionId: isSet(object.tableVersionId) ? Number(object.tableVersionId) : 0, - returning: isSet(object.returning) ? Boolean(object.returning) : false, - }; - }, - - toJSON(message: DeleteNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.tableVersionId !== undefined && (obj.tableVersionId = Math.round(message.tableVersionId)); - message.returning !== undefined && (obj.returning = message.returning); - return obj; - }, - - fromPartial, I>>(object: I): DeleteNode { - const message = createBaseDeleteNode(); - message.tableId = object.tableId ?? 0; - message.tableVersionId = object.tableVersionId ?? 0; - message.returning = object.returning ?? false; - return message; - }, -}; - -function createBaseUpdateNode(): UpdateNode { - return { tableId: 0, tableVersionId: 0, exprs: [], returning: false }; -} - -export const UpdateNode = { - fromJSON(object: any): UpdateNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - tableVersionId: isSet(object.tableVersionId) ? Number(object.tableVersionId) : 0, - exprs: Array.isArray(object?.exprs) ? object.exprs.map((e: any) => ExprNode.fromJSON(e)) : [], - returning: isSet(object.returning) ? Boolean(object.returning) : false, - }; - }, - - toJSON(message: UpdateNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.tableVersionId !== undefined && (obj.tableVersionId = Math.round(message.tableVersionId)); - if (message.exprs) { - obj.exprs = message.exprs.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.exprs = []; - } - message.returning !== undefined && (obj.returning = message.returning); - return obj; - }, - - fromPartial, I>>(object: I): UpdateNode { - const message = createBaseUpdateNode(); - message.tableId = object.tableId ?? 0; - message.tableVersionId = object.tableVersionId ?? 0; - message.exprs = object.exprs?.map((e) => ExprNode.fromPartial(e)) || []; - message.returning = object.returning ?? false; - return message; - }, -}; - -function createBaseValuesNode(): ValuesNode { - return { tuples: [], fields: [] }; -} - -export const ValuesNode = { - fromJSON(object: any): ValuesNode { - return { - tuples: Array.isArray(object?.tuples) ? object.tuples.map((e: any) => ValuesNode_ExprTuple.fromJSON(e)) : [], - fields: Array.isArray(object?.fields) ? object.fields.map((e: any) => Field.fromJSON(e)) : [], - }; - }, - - toJSON(message: ValuesNode): unknown { - const obj: any = {}; - if (message.tuples) { - obj.tuples = message.tuples.map((e) => e ? ValuesNode_ExprTuple.toJSON(e) : undefined); - } else { - obj.tuples = []; - } - if (message.fields) { - obj.fields = message.fields.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.fields = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ValuesNode { - const message = createBaseValuesNode(); - message.tuples = object.tuples?.map((e) => ValuesNode_ExprTuple.fromPartial(e)) || []; - message.fields = object.fields?.map((e) => Field.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseValuesNode_ExprTuple(): ValuesNode_ExprTuple { - return { cells: [] }; -} - -export const ValuesNode_ExprTuple = { - fromJSON(object: any): ValuesNode_ExprTuple { - return { cells: Array.isArray(object?.cells) ? object.cells.map((e: any) => ExprNode.fromJSON(e)) : [] }; - }, - - toJSON(message: ValuesNode_ExprTuple): unknown { - const obj: any = {}; - if (message.cells) { - obj.cells = message.cells.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.cells = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ValuesNode_ExprTuple { - const message = createBaseValuesNode_ExprTuple(); - message.cells = object.cells?.map((e) => ExprNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSortNode(): SortNode { - return { columnOrders: [] }; -} - -export const SortNode = { - fromJSON(object: any): SortNode { - return { - columnOrders: Array.isArray(object?.columnOrders) - ? object.columnOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - }; - }, - - toJSON(message: SortNode): unknown { - const obj: any = {}; - if (message.columnOrders) { - obj.columnOrders = message.columnOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.columnOrders = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SortNode { - const message = createBaseSortNode(); - message.columnOrders = object.columnOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseTopNNode(): TopNNode { - return { columnOrders: [], limit: 0, offset: 0, withTies: false }; -} - -export const TopNNode = { - fromJSON(object: any): TopNNode { - return { - columnOrders: Array.isArray(object?.columnOrders) - ? object.columnOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - limit: isSet(object.limit) ? Number(object.limit) : 0, - offset: isSet(object.offset) ? Number(object.offset) : 0, - withTies: isSet(object.withTies) ? Boolean(object.withTies) : false, - }; - }, - - toJSON(message: TopNNode): unknown { - const obj: any = {}; - if (message.columnOrders) { - obj.columnOrders = message.columnOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.columnOrders = []; - } - message.limit !== undefined && (obj.limit = Math.round(message.limit)); - message.offset !== undefined && (obj.offset = Math.round(message.offset)); - message.withTies !== undefined && (obj.withTies = message.withTies); - return obj; - }, - - fromPartial, I>>(object: I): TopNNode { - const message = createBaseTopNNode(); - message.columnOrders = object.columnOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.limit = object.limit ?? 0; - message.offset = object.offset ?? 0; - message.withTies = object.withTies ?? false; - return message; - }, -}; - -function createBaseGroupTopNNode(): GroupTopNNode { - return { columnOrders: [], limit: 0, offset: 0, groupKey: [], withTies: false }; -} - -export const GroupTopNNode = { - fromJSON(object: any): GroupTopNNode { - return { - columnOrders: Array.isArray(object?.columnOrders) - ? object.columnOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - limit: isSet(object.limit) ? Number(object.limit) : 0, - offset: isSet(object.offset) ? Number(object.offset) : 0, - groupKey: Array.isArray(object?.groupKey) - ? object.groupKey.map((e: any) => Number(e)) - : [], - withTies: isSet(object.withTies) ? Boolean(object.withTies) : false, - }; - }, - - toJSON(message: GroupTopNNode): unknown { - const obj: any = {}; - if (message.columnOrders) { - obj.columnOrders = message.columnOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.columnOrders = []; - } - message.limit !== undefined && (obj.limit = Math.round(message.limit)); - message.offset !== undefined && (obj.offset = Math.round(message.offset)); - if (message.groupKey) { - obj.groupKey = message.groupKey.map((e) => Math.round(e)); - } else { - obj.groupKey = []; - } - message.withTies !== undefined && (obj.withTies = message.withTies); - return obj; - }, - - fromPartial, I>>(object: I): GroupTopNNode { - const message = createBaseGroupTopNNode(); - message.columnOrders = object.columnOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.limit = object.limit ?? 0; - message.offset = object.offset ?? 0; - message.groupKey = object.groupKey?.map((e) => e) || []; - message.withTies = object.withTies ?? false; - return message; - }, -}; - -function createBaseLimitNode(): LimitNode { - return { limit: 0, offset: 0 }; -} - -export const LimitNode = { - fromJSON(object: any): LimitNode { - return { - limit: isSet(object.limit) ? Number(object.limit) : 0, - offset: isSet(object.offset) ? Number(object.offset) : 0, - }; - }, - - toJSON(message: LimitNode): unknown { - const obj: any = {}; - message.limit !== undefined && (obj.limit = Math.round(message.limit)); - message.offset !== undefined && (obj.offset = Math.round(message.offset)); - return obj; - }, - - fromPartial, I>>(object: I): LimitNode { - const message = createBaseLimitNode(); - message.limit = object.limit ?? 0; - message.offset = object.offset ?? 0; - return message; - }, -}; - -function createBaseNestedLoopJoinNode(): NestedLoopJoinNode { - return { joinType: JoinType.UNSPECIFIED, joinCond: undefined, outputIndices: [] }; -} - -export const NestedLoopJoinNode = { - fromJSON(object: any): NestedLoopJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - joinCond: isSet(object.joinCond) ? ExprNode.fromJSON(object.joinCond) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: NestedLoopJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - message.joinCond !== undefined && (obj.joinCond = message.joinCond ? ExprNode.toJSON(message.joinCond) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): NestedLoopJoinNode { - const message = createBaseNestedLoopJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.joinCond = (object.joinCond !== undefined && object.joinCond !== null) - ? ExprNode.fromPartial(object.joinCond) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseHashAggNode(): HashAggNode { - return { groupKey: [], aggCalls: [] }; -} - -export const HashAggNode = { - fromJSON(object: any): HashAggNode { - return { - groupKey: Array.isArray(object?.groupKey) ? object.groupKey.map((e: any) => Number(e)) : [], - aggCalls: Array.isArray(object?.aggCalls) ? object.aggCalls.map((e: any) => AggCall.fromJSON(e)) : [], - }; - }, - - toJSON(message: HashAggNode): unknown { - const obj: any = {}; - if (message.groupKey) { - obj.groupKey = message.groupKey.map((e) => Math.round(e)); - } else { - obj.groupKey = []; - } - if (message.aggCalls) { - obj.aggCalls = message.aggCalls.map((e) => e ? AggCall.toJSON(e) : undefined); - } else { - obj.aggCalls = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HashAggNode { - const message = createBaseHashAggNode(); - message.groupKey = object.groupKey?.map((e) => e) || []; - message.aggCalls = object.aggCalls?.map((e) => AggCall.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseExpandNode(): ExpandNode { - return { columnSubsets: [] }; -} - -export const ExpandNode = { - fromJSON(object: any): ExpandNode { - return { - columnSubsets: Array.isArray(object?.columnSubsets) - ? object.columnSubsets.map((e: any) => ExpandNode_Subset.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ExpandNode): unknown { - const obj: any = {}; - if (message.columnSubsets) { - obj.columnSubsets = message.columnSubsets.map((e) => e ? ExpandNode_Subset.toJSON(e) : undefined); - } else { - obj.columnSubsets = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExpandNode { - const message = createBaseExpandNode(); - message.columnSubsets = object.columnSubsets?.map((e) => ExpandNode_Subset.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseExpandNode_Subset(): ExpandNode_Subset { - return { columnIndices: [] }; -} - -export const ExpandNode_Subset = { - fromJSON(object: any): ExpandNode_Subset { - return { - columnIndices: Array.isArray(object?.columnIndices) ? object.columnIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ExpandNode_Subset): unknown { - const obj: any = {}; - if (message.columnIndices) { - obj.columnIndices = message.columnIndices.map((e) => Math.round(e)); - } else { - obj.columnIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExpandNode_Subset { - const message = createBaseExpandNode_Subset(); - message.columnIndices = object.columnIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseProjectSetNode(): ProjectSetNode { - return { selectList: [] }; -} - -export const ProjectSetNode = { - fromJSON(object: any): ProjectSetNode { - return { - selectList: Array.isArray(object?.selectList) - ? object.selectList.map((e: any) => ProjectSetSelectItem.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ProjectSetNode): unknown { - const obj: any = {}; - if (message.selectList) { - obj.selectList = message.selectList.map((e) => e ? ProjectSetSelectItem.toJSON(e) : undefined); - } else { - obj.selectList = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ProjectSetNode { - const message = createBaseProjectSetNode(); - message.selectList = object.selectList?.map((e) => ProjectSetSelectItem.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSortAggNode(): SortAggNode { - return { groupKey: [], aggCalls: [] }; -} - -export const SortAggNode = { - fromJSON(object: any): SortAggNode { - return { - groupKey: Array.isArray(object?.groupKey) ? object.groupKey.map((e: any) => ExprNode.fromJSON(e)) : [], - aggCalls: Array.isArray(object?.aggCalls) ? object.aggCalls.map((e: any) => AggCall.fromJSON(e)) : [], - }; - }, - - toJSON(message: SortAggNode): unknown { - const obj: any = {}; - if (message.groupKey) { - obj.groupKey = message.groupKey.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.groupKey = []; - } - if (message.aggCalls) { - obj.aggCalls = message.aggCalls.map((e) => e ? AggCall.toJSON(e) : undefined); - } else { - obj.aggCalls = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SortAggNode { - const message = createBaseSortAggNode(); - message.groupKey = object.groupKey?.map((e) => ExprNode.fromPartial(e)) || []; - message.aggCalls = object.aggCalls?.map((e) => AggCall.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseHashJoinNode(): HashJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - leftKey: [], - rightKey: [], - condition: undefined, - outputIndices: [], - nullSafe: [], - }; -} - -export const HashJoinNode = { - fromJSON(object: any): HashJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - leftKey: Array.isArray(object?.leftKey) ? object.leftKey.map((e: any) => Number(e)) : [], - rightKey: Array.isArray(object?.rightKey) ? object.rightKey.map((e: any) => Number(e)) : [], - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - nullSafe: Array.isArray(object?.nullSafe) ? object.nullSafe.map((e: any) => Boolean(e)) : [], - }; - }, - - toJSON(message: HashJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - if (message.leftKey) { - obj.leftKey = message.leftKey.map((e) => Math.round(e)); - } else { - obj.leftKey = []; - } - if (message.rightKey) { - obj.rightKey = message.rightKey.map((e) => Math.round(e)); - } else { - obj.rightKey = []; - } - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.nullSafe) { - obj.nullSafe = message.nullSafe.map((e) => e); - } else { - obj.nullSafe = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HashJoinNode { - const message = createBaseHashJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.leftKey = object.leftKey?.map((e) => e) || []; - message.rightKey = object.rightKey?.map((e) => e) || []; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.nullSafe = object.nullSafe?.map((e) => e) || []; - return message; - }, -}; - -function createBaseSortMergeJoinNode(): SortMergeJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - leftKey: [], - rightKey: [], - direction: Direction.DIRECTION_UNSPECIFIED, - outputIndices: [], - }; -} - -export const SortMergeJoinNode = { - fromJSON(object: any): SortMergeJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - leftKey: Array.isArray(object?.leftKey) ? object.leftKey.map((e: any) => Number(e)) : [], - rightKey: Array.isArray(object?.rightKey) ? object.rightKey.map((e: any) => Number(e)) : [], - direction: isSet(object.direction) ? directionFromJSON(object.direction) : Direction.DIRECTION_UNSPECIFIED, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: SortMergeJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - if (message.leftKey) { - obj.leftKey = message.leftKey.map((e) => Math.round(e)); - } else { - obj.leftKey = []; - } - if (message.rightKey) { - obj.rightKey = message.rightKey.map((e) => Math.round(e)); - } else { - obj.rightKey = []; - } - message.direction !== undefined && (obj.direction = directionToJSON(message.direction)); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SortMergeJoinNode { - const message = createBaseSortMergeJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.leftKey = object.leftKey?.map((e) => e) || []; - message.rightKey = object.rightKey?.map((e) => e) || []; - message.direction = object.direction ?? Direction.DIRECTION_UNSPECIFIED; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseHopWindowNode(): HopWindowNode { - return { - timeCol: 0, - windowSlide: undefined, - windowSize: undefined, - outputIndices: [], - windowStartExprs: [], - windowEndExprs: [], - }; -} - -export const HopWindowNode = { - fromJSON(object: any): HopWindowNode { - return { - timeCol: isSet(object.timeCol) ? Number(object.timeCol) : 0, - windowSlide: isSet(object.windowSlide) ? IntervalUnit.fromJSON(object.windowSlide) : undefined, - windowSize: isSet(object.windowSize) ? IntervalUnit.fromJSON(object.windowSize) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - windowStartExprs: Array.isArray(object?.windowStartExprs) - ? object.windowStartExprs.map((e: any) => ExprNode.fromJSON(e)) - : [], - windowEndExprs: Array.isArray(object?.windowEndExprs) - ? object.windowEndExprs.map((e: any) => ExprNode.fromJSON(e)) - : [], - }; - }, - - toJSON(message: HopWindowNode): unknown { - const obj: any = {}; - message.timeCol !== undefined && (obj.timeCol = Math.round(message.timeCol)); - message.windowSlide !== undefined && - (obj.windowSlide = message.windowSlide ? IntervalUnit.toJSON(message.windowSlide) : undefined); - message.windowSize !== undefined && - (obj.windowSize = message.windowSize ? IntervalUnit.toJSON(message.windowSize) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.windowStartExprs) { - obj.windowStartExprs = message.windowStartExprs.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.windowStartExprs = []; - } - if (message.windowEndExprs) { - obj.windowEndExprs = message.windowEndExprs.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.windowEndExprs = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HopWindowNode { - const message = createBaseHopWindowNode(); - message.timeCol = object.timeCol ?? 0; - message.windowSlide = (object.windowSlide !== undefined && object.windowSlide !== null) - ? IntervalUnit.fromPartial(object.windowSlide) - : undefined; - message.windowSize = (object.windowSize !== undefined && object.windowSize !== null) - ? IntervalUnit.fromPartial(object.windowSize) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.windowStartExprs = object.windowStartExprs?.map((e) => ExprNode.fromPartial(e)) || []; - message.windowEndExprs = object.windowEndExprs?.map((e) => ExprNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseTableFunctionNode(): TableFunctionNode { - return { tableFunction: undefined }; -} - -export const TableFunctionNode = { - fromJSON(object: any): TableFunctionNode { - return { tableFunction: isSet(object.tableFunction) ? TableFunction.fromJSON(object.tableFunction) : undefined }; - }, - - toJSON(message: TableFunctionNode): unknown { - const obj: any = {}; - message.tableFunction !== undefined && - (obj.tableFunction = message.tableFunction ? TableFunction.toJSON(message.tableFunction) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): TableFunctionNode { - const message = createBaseTableFunctionNode(); - message.tableFunction = (object.tableFunction !== undefined && object.tableFunction !== null) - ? TableFunction.fromPartial(object.tableFunction) - : undefined; - return message; - }, -}; - -function createBaseTaskId(): TaskId { - return { queryId: "", stageId: 0, taskId: 0 }; -} - -export const TaskId = { - fromJSON(object: any): TaskId { - return { - queryId: isSet(object.queryId) ? String(object.queryId) : "", - stageId: isSet(object.stageId) ? Number(object.stageId) : 0, - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - }; - }, - - toJSON(message: TaskId): unknown { - const obj: any = {}; - message.queryId !== undefined && (obj.queryId = message.queryId); - message.stageId !== undefined && (obj.stageId = Math.round(message.stageId)); - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - return obj; - }, - - fromPartial, I>>(object: I): TaskId { - const message = createBaseTaskId(); - message.queryId = object.queryId ?? ""; - message.stageId = object.stageId ?? 0; - message.taskId = object.taskId ?? 0; - return message; - }, -}; - -function createBaseTaskOutputId(): TaskOutputId { - return { taskId: undefined, outputId: 0 }; -} - -export const TaskOutputId = { - fromJSON(object: any): TaskOutputId { - return { - taskId: isSet(object.taskId) ? TaskId.fromJSON(object.taskId) : undefined, - outputId: isSet(object.outputId) ? Number(object.outputId) : 0, - }; - }, - - toJSON(message: TaskOutputId): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId.toJSON(message.taskId) : undefined); - message.outputId !== undefined && (obj.outputId = Math.round(message.outputId)); - return obj; - }, - - fromPartial, I>>(object: I): TaskOutputId { - const message = createBaseTaskOutputId(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId.fromPartial(object.taskId) - : undefined; - message.outputId = object.outputId ?? 0; - return message; - }, -}; - -function createBaseLocalExecutePlan(): LocalExecutePlan { - return { plan: undefined, epoch: undefined }; -} - -export const LocalExecutePlan = { - fromJSON(object: any): LocalExecutePlan { - return { - plan: isSet(object.plan) ? PlanFragment.fromJSON(object.plan) : undefined, - epoch: isSet(object.epoch) ? BatchQueryEpoch.fromJSON(object.epoch) : undefined, - }; - }, - - toJSON(message: LocalExecutePlan): unknown { - const obj: any = {}; - message.plan !== undefined && (obj.plan = message.plan ? PlanFragment.toJSON(message.plan) : undefined); - message.epoch !== undefined && (obj.epoch = message.epoch ? BatchQueryEpoch.toJSON(message.epoch) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): LocalExecutePlan { - const message = createBaseLocalExecutePlan(); - message.plan = (object.plan !== undefined && object.plan !== null) - ? PlanFragment.fromPartial(object.plan) - : undefined; - message.epoch = (object.epoch !== undefined && object.epoch !== null) - ? BatchQueryEpoch.fromPartial(object.epoch) - : undefined; - return message; - }, -}; - -function createBaseExchangeSource(): ExchangeSource { - return { taskOutputId: undefined, host: undefined, localExecutePlan: undefined }; -} - -export const ExchangeSource = { - fromJSON(object: any): ExchangeSource { - return { - taskOutputId: isSet(object.taskOutputId) ? TaskOutputId.fromJSON(object.taskOutputId) : undefined, - host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined, - localExecutePlan: isSet(object.plan) - ? { $case: "plan", plan: LocalExecutePlan.fromJSON(object.plan) } - : undefined, - }; - }, - - toJSON(message: ExchangeSource): unknown { - const obj: any = {}; - message.taskOutputId !== undefined && - (obj.taskOutputId = message.taskOutputId ? TaskOutputId.toJSON(message.taskOutputId) : undefined); - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - message.localExecutePlan?.$case === "plan" && - (obj.plan = message.localExecutePlan?.plan ? LocalExecutePlan.toJSON(message.localExecutePlan?.plan) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ExchangeSource { - const message = createBaseExchangeSource(); - message.taskOutputId = (object.taskOutputId !== undefined && object.taskOutputId !== null) - ? TaskOutputId.fromPartial(object.taskOutputId) - : undefined; - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - if ( - object.localExecutePlan?.$case === "plan" && - object.localExecutePlan?.plan !== undefined && - object.localExecutePlan?.plan !== null - ) { - message.localExecutePlan = { $case: "plan", plan: LocalExecutePlan.fromPartial(object.localExecutePlan.plan) }; - } - return message; - }, -}; - -function createBaseExchangeNode(): ExchangeNode { - return { sources: [], inputSchema: [] }; -} - -export const ExchangeNode = { - fromJSON(object: any): ExchangeNode { - return { - sources: Array.isArray(object?.sources) ? object.sources.map((e: any) => ExchangeSource.fromJSON(e)) : [], - inputSchema: Array.isArray(object?.inputSchema) ? object.inputSchema.map((e: any) => Field.fromJSON(e)) : [], - }; - }, - - toJSON(message: ExchangeNode): unknown { - const obj: any = {}; - if (message.sources) { - obj.sources = message.sources.map((e) => e ? ExchangeSource.toJSON(e) : undefined); - } else { - obj.sources = []; - } - if (message.inputSchema) { - obj.inputSchema = message.inputSchema.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.inputSchema = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExchangeNode { - const message = createBaseExchangeNode(); - message.sources = object.sources?.map((e) => ExchangeSource.fromPartial(e)) || []; - message.inputSchema = object.inputSchema?.map((e) => Field.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMergeSortExchangeNode(): MergeSortExchangeNode { - return { exchange: undefined, columnOrders: [] }; -} - -export const MergeSortExchangeNode = { - fromJSON(object: any): MergeSortExchangeNode { - return { - exchange: isSet(object.exchange) ? ExchangeNode.fromJSON(object.exchange) : undefined, - columnOrders: Array.isArray(object?.columnOrders) - ? object.columnOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MergeSortExchangeNode): unknown { - const obj: any = {}; - message.exchange !== undefined && - (obj.exchange = message.exchange ? ExchangeNode.toJSON(message.exchange) : undefined); - if (message.columnOrders) { - obj.columnOrders = message.columnOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.columnOrders = []; - } - return obj; - }, - - fromPartial, I>>(object: I): MergeSortExchangeNode { - const message = createBaseMergeSortExchangeNode(); - message.exchange = (object.exchange !== undefined && object.exchange !== null) - ? ExchangeNode.fromPartial(object.exchange) - : undefined; - message.columnOrders = object.columnOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseLocalLookupJoinNode(): LocalLookupJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - condition: undefined, - outerSideKey: [], - innerSideKey: [], - lookupPrefixLen: 0, - innerSideTableDesc: undefined, - innerSideVnodeMapping: [], - innerSideColumnIds: [], - outputIndices: [], - workerNodes: [], - nullSafe: [], - }; -} - -export const LocalLookupJoinNode = { - fromJSON(object: any): LocalLookupJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - outerSideKey: Array.isArray(object?.outerSideKey) ? object.outerSideKey.map((e: any) => Number(e)) : [], - innerSideKey: Array.isArray(object?.innerSideKey) ? object.innerSideKey.map((e: any) => Number(e)) : [], - lookupPrefixLen: isSet(object.lookupPrefixLen) ? Number(object.lookupPrefixLen) : 0, - innerSideTableDesc: isSet(object.innerSideTableDesc) - ? StorageTableDesc.fromJSON(object.innerSideTableDesc) - : undefined, - innerSideVnodeMapping: Array.isArray(object?.innerSideVnodeMapping) - ? object.innerSideVnodeMapping.map((e: any) => Number(e)) - : [], - innerSideColumnIds: Array.isArray(object?.innerSideColumnIds) - ? object.innerSideColumnIds.map((e: any) => Number(e)) - : [], - outputIndices: Array.isArray(object?.outputIndices) - ? object.outputIndices.map((e: any) => Number(e)) - : [], - workerNodes: Array.isArray(object?.workerNodes) ? object.workerNodes.map((e: any) => WorkerNode.fromJSON(e)) : [], - nullSafe: Array.isArray(object?.nullSafe) ? object.nullSafe.map((e: any) => Boolean(e)) : [], - }; - }, - - toJSON(message: LocalLookupJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - if (message.outerSideKey) { - obj.outerSideKey = message.outerSideKey.map((e) => Math.round(e)); - } else { - obj.outerSideKey = []; - } - if (message.innerSideKey) { - obj.innerSideKey = message.innerSideKey.map((e) => Math.round(e)); - } else { - obj.innerSideKey = []; - } - message.lookupPrefixLen !== undefined && (obj.lookupPrefixLen = Math.round(message.lookupPrefixLen)); - message.innerSideTableDesc !== undefined && (obj.innerSideTableDesc = message.innerSideTableDesc - ? StorageTableDesc.toJSON(message.innerSideTableDesc) - : undefined); - if (message.innerSideVnodeMapping) { - obj.innerSideVnodeMapping = message.innerSideVnodeMapping.map((e) => Math.round(e)); - } else { - obj.innerSideVnodeMapping = []; - } - if (message.innerSideColumnIds) { - obj.innerSideColumnIds = message.innerSideColumnIds.map((e) => Math.round(e)); - } else { - obj.innerSideColumnIds = []; - } - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.workerNodes) { - obj.workerNodes = message.workerNodes.map((e) => e ? WorkerNode.toJSON(e) : undefined); - } else { - obj.workerNodes = []; - } - if (message.nullSafe) { - obj.nullSafe = message.nullSafe.map((e) => e); - } else { - obj.nullSafe = []; - } - return obj; - }, - - fromPartial, I>>(object: I): LocalLookupJoinNode { - const message = createBaseLocalLookupJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.outerSideKey = object.outerSideKey?.map((e) => e) || []; - message.innerSideKey = object.innerSideKey?.map((e) => e) || []; - message.lookupPrefixLen = object.lookupPrefixLen ?? 0; - message.innerSideTableDesc = (object.innerSideTableDesc !== undefined && object.innerSideTableDesc !== null) - ? StorageTableDesc.fromPartial(object.innerSideTableDesc) - : undefined; - message.innerSideVnodeMapping = object.innerSideVnodeMapping?.map((e) => e) || []; - message.innerSideColumnIds = object.innerSideColumnIds?.map((e) => e) || []; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.workerNodes = object.workerNodes?.map((e) => WorkerNode.fromPartial(e)) || []; - message.nullSafe = object.nullSafe?.map((e) => e) || []; - return message; - }, -}; - -function createBaseDistributedLookupJoinNode(): DistributedLookupJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - condition: undefined, - outerSideKey: [], - innerSideKey: [], - lookupPrefixLen: 0, - innerSideTableDesc: undefined, - innerSideColumnIds: [], - outputIndices: [], - nullSafe: [], - }; -} - -export const DistributedLookupJoinNode = { - fromJSON(object: any): DistributedLookupJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - outerSideKey: Array.isArray(object?.outerSideKey) ? object.outerSideKey.map((e: any) => Number(e)) : [], - innerSideKey: Array.isArray(object?.innerSideKey) ? object.innerSideKey.map((e: any) => Number(e)) : [], - lookupPrefixLen: isSet(object.lookupPrefixLen) ? Number(object.lookupPrefixLen) : 0, - innerSideTableDesc: isSet(object.innerSideTableDesc) - ? StorageTableDesc.fromJSON(object.innerSideTableDesc) - : undefined, - innerSideColumnIds: Array.isArray(object?.innerSideColumnIds) - ? object.innerSideColumnIds.map((e: any) => Number(e)) - : [], - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - nullSafe: Array.isArray(object?.nullSafe) ? object.nullSafe.map((e: any) => Boolean(e)) : [], - }; - }, - - toJSON(message: DistributedLookupJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - if (message.outerSideKey) { - obj.outerSideKey = message.outerSideKey.map((e) => Math.round(e)); - } else { - obj.outerSideKey = []; - } - if (message.innerSideKey) { - obj.innerSideKey = message.innerSideKey.map((e) => Math.round(e)); - } else { - obj.innerSideKey = []; - } - message.lookupPrefixLen !== undefined && (obj.lookupPrefixLen = Math.round(message.lookupPrefixLen)); - message.innerSideTableDesc !== undefined && (obj.innerSideTableDesc = message.innerSideTableDesc - ? StorageTableDesc.toJSON(message.innerSideTableDesc) - : undefined); - if (message.innerSideColumnIds) { - obj.innerSideColumnIds = message.innerSideColumnIds.map((e) => Math.round(e)); - } else { - obj.innerSideColumnIds = []; - } - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.nullSafe) { - obj.nullSafe = message.nullSafe.map((e) => e); - } else { - obj.nullSafe = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DistributedLookupJoinNode { - const message = createBaseDistributedLookupJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.outerSideKey = object.outerSideKey?.map((e) => e) || []; - message.innerSideKey = object.innerSideKey?.map((e) => e) || []; - message.lookupPrefixLen = object.lookupPrefixLen ?? 0; - message.innerSideTableDesc = (object.innerSideTableDesc !== undefined && object.innerSideTableDesc !== null) - ? StorageTableDesc.fromPartial(object.innerSideTableDesc) - : undefined; - message.innerSideColumnIds = object.innerSideColumnIds?.map((e) => e) || []; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.nullSafe = object.nullSafe?.map((e) => e) || []; - return message; - }, -}; - -function createBaseUnionNode(): UnionNode { - return {}; -} - -export const UnionNode = { - fromJSON(_: any): UnionNode { - return {}; - }, - - toJSON(_: UnionNode): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): UnionNode { - const message = createBaseUnionNode(); - return message; - }, -}; - -function createBasePlanNode(): PlanNode { - return { children: [], nodeBody: undefined, identity: "" }; -} - -export const PlanNode = { - fromJSON(object: any): PlanNode { - return { - children: Array.isArray(object?.children) ? object.children.map((e: any) => PlanNode.fromJSON(e)) : [], - nodeBody: isSet(object.insert) - ? { $case: "insert", insert: InsertNode.fromJSON(object.insert) } - : isSet(object.delete) - ? { $case: "delete", delete: DeleteNode.fromJSON(object.delete) } - : isSet(object.update) - ? { $case: "update", update: UpdateNode.fromJSON(object.update) } - : isSet(object.project) - ? { $case: "project", project: ProjectNode.fromJSON(object.project) } - : isSet(object.hashAgg) - ? { $case: "hashAgg", hashAgg: HashAggNode.fromJSON(object.hashAgg) } - : isSet(object.filter) - ? { $case: "filter", filter: FilterNode.fromJSON(object.filter) } - : isSet(object.exchange) - ? { $case: "exchange", exchange: ExchangeNode.fromJSON(object.exchange) } - : isSet(object.sort) - ? { $case: "sort", sort: SortNode.fromJSON(object.sort) } - : isSet(object.nestedLoopJoin) - ? { $case: "nestedLoopJoin", nestedLoopJoin: NestedLoopJoinNode.fromJSON(object.nestedLoopJoin) } - : isSet(object.topN) - ? { $case: "topN", topN: TopNNode.fromJSON(object.topN) } - : isSet(object.sortAgg) - ? { $case: "sortAgg", sortAgg: SortAggNode.fromJSON(object.sortAgg) } - : isSet(object.rowSeqScan) - ? { $case: "rowSeqScan", rowSeqScan: RowSeqScanNode.fromJSON(object.rowSeqScan) } - : isSet(object.limit) - ? { $case: "limit", limit: LimitNode.fromJSON(object.limit) } - : isSet(object.values) - ? { $case: "values", values: ValuesNode.fromJSON(object.values) } - : isSet(object.hashJoin) - ? { $case: "hashJoin", hashJoin: HashJoinNode.fromJSON(object.hashJoin) } - : isSet(object.mergeSortExchange) - ? { $case: "mergeSortExchange", mergeSortExchange: MergeSortExchangeNode.fromJSON(object.mergeSortExchange) } - : isSet(object.hopWindow) - ? { $case: "hopWindow", hopWindow: HopWindowNode.fromJSON(object.hopWindow) } - : isSet(object.tableFunction) - ? { $case: "tableFunction", tableFunction: TableFunctionNode.fromJSON(object.tableFunction) } - : isSet(object.sysRowSeqScan) - ? { $case: "sysRowSeqScan", sysRowSeqScan: SysRowSeqScanNode.fromJSON(object.sysRowSeqScan) } - : isSet(object.expand) - ? { $case: "expand", expand: ExpandNode.fromJSON(object.expand) } - : isSet(object.localLookupJoin) - ? { $case: "localLookupJoin", localLookupJoin: LocalLookupJoinNode.fromJSON(object.localLookupJoin) } - : isSet(object.projectSet) - ? { $case: "projectSet", projectSet: ProjectSetNode.fromJSON(object.projectSet) } - : isSet(object.union) - ? { $case: "union", union: UnionNode.fromJSON(object.union) } - : isSet(object.groupTopN) - ? { $case: "groupTopN", groupTopN: GroupTopNNode.fromJSON(object.groupTopN) } - : isSet(object.distributedLookupJoin) - ? { - $case: "distributedLookupJoin", - distributedLookupJoin: DistributedLookupJoinNode.fromJSON(object.distributedLookupJoin), - } - : isSet(object.source) - ? { $case: "source", source: SourceNode.fromJSON(object.source) } - : undefined, - identity: isSet(object.identity) ? String(object.identity) : "", - }; - }, - - toJSON(message: PlanNode): unknown { - const obj: any = {}; - if (message.children) { - obj.children = message.children.map((e) => e ? PlanNode.toJSON(e) : undefined); - } else { - obj.children = []; - } - message.nodeBody?.$case === "insert" && - (obj.insert = message.nodeBody?.insert ? InsertNode.toJSON(message.nodeBody?.insert) : undefined); - message.nodeBody?.$case === "delete" && - (obj.delete = message.nodeBody?.delete ? DeleteNode.toJSON(message.nodeBody?.delete) : undefined); - message.nodeBody?.$case === "update" && - (obj.update = message.nodeBody?.update ? UpdateNode.toJSON(message.nodeBody?.update) : undefined); - message.nodeBody?.$case === "project" && - (obj.project = message.nodeBody?.project ? ProjectNode.toJSON(message.nodeBody?.project) : undefined); - message.nodeBody?.$case === "hashAgg" && - (obj.hashAgg = message.nodeBody?.hashAgg ? HashAggNode.toJSON(message.nodeBody?.hashAgg) : undefined); - message.nodeBody?.$case === "filter" && - (obj.filter = message.nodeBody?.filter ? FilterNode.toJSON(message.nodeBody?.filter) : undefined); - message.nodeBody?.$case === "exchange" && - (obj.exchange = message.nodeBody?.exchange ? ExchangeNode.toJSON(message.nodeBody?.exchange) : undefined); - message.nodeBody?.$case === "sort" && - (obj.sort = message.nodeBody?.sort ? SortNode.toJSON(message.nodeBody?.sort) : undefined); - message.nodeBody?.$case === "nestedLoopJoin" && (obj.nestedLoopJoin = message.nodeBody?.nestedLoopJoin - ? NestedLoopJoinNode.toJSON(message.nodeBody?.nestedLoopJoin) - : undefined); - message.nodeBody?.$case === "topN" && - (obj.topN = message.nodeBody?.topN ? TopNNode.toJSON(message.nodeBody?.topN) : undefined); - message.nodeBody?.$case === "sortAgg" && - (obj.sortAgg = message.nodeBody?.sortAgg ? SortAggNode.toJSON(message.nodeBody?.sortAgg) : undefined); - message.nodeBody?.$case === "rowSeqScan" && - (obj.rowSeqScan = message.nodeBody?.rowSeqScan ? RowSeqScanNode.toJSON(message.nodeBody?.rowSeqScan) : undefined); - message.nodeBody?.$case === "limit" && - (obj.limit = message.nodeBody?.limit ? LimitNode.toJSON(message.nodeBody?.limit) : undefined); - message.nodeBody?.$case === "values" && - (obj.values = message.nodeBody?.values ? ValuesNode.toJSON(message.nodeBody?.values) : undefined); - message.nodeBody?.$case === "hashJoin" && - (obj.hashJoin = message.nodeBody?.hashJoin ? HashJoinNode.toJSON(message.nodeBody?.hashJoin) : undefined); - message.nodeBody?.$case === "mergeSortExchange" && (obj.mergeSortExchange = message.nodeBody?.mergeSortExchange - ? MergeSortExchangeNode.toJSON(message.nodeBody?.mergeSortExchange) - : undefined); - message.nodeBody?.$case === "hopWindow" && - (obj.hopWindow = message.nodeBody?.hopWindow ? HopWindowNode.toJSON(message.nodeBody?.hopWindow) : undefined); - message.nodeBody?.$case === "tableFunction" && (obj.tableFunction = message.nodeBody?.tableFunction - ? TableFunctionNode.toJSON(message.nodeBody?.tableFunction) - : undefined); - message.nodeBody?.$case === "sysRowSeqScan" && (obj.sysRowSeqScan = message.nodeBody?.sysRowSeqScan - ? SysRowSeqScanNode.toJSON(message.nodeBody?.sysRowSeqScan) - : undefined); - message.nodeBody?.$case === "expand" && - (obj.expand = message.nodeBody?.expand ? ExpandNode.toJSON(message.nodeBody?.expand) : undefined); - message.nodeBody?.$case === "localLookupJoin" && (obj.localLookupJoin = message.nodeBody?.localLookupJoin - ? LocalLookupJoinNode.toJSON(message.nodeBody?.localLookupJoin) - : undefined); - message.nodeBody?.$case === "projectSet" && - (obj.projectSet = message.nodeBody?.projectSet ? ProjectSetNode.toJSON(message.nodeBody?.projectSet) : undefined); - message.nodeBody?.$case === "union" && - (obj.union = message.nodeBody?.union ? UnionNode.toJSON(message.nodeBody?.union) : undefined); - message.nodeBody?.$case === "groupTopN" && - (obj.groupTopN = message.nodeBody?.groupTopN ? GroupTopNNode.toJSON(message.nodeBody?.groupTopN) : undefined); - message.nodeBody?.$case === "distributedLookupJoin" && - (obj.distributedLookupJoin = message.nodeBody?.distributedLookupJoin - ? DistributedLookupJoinNode.toJSON(message.nodeBody?.distributedLookupJoin) - : undefined); - message.nodeBody?.$case === "source" && - (obj.source = message.nodeBody?.source ? SourceNode.toJSON(message.nodeBody?.source) : undefined); - message.identity !== undefined && (obj.identity = message.identity); - return obj; - }, - - fromPartial, I>>(object: I): PlanNode { - const message = createBasePlanNode(); - message.children = object.children?.map((e) => PlanNode.fromPartial(e)) || []; - if ( - object.nodeBody?.$case === "insert" && object.nodeBody?.insert !== undefined && object.nodeBody?.insert !== null - ) { - message.nodeBody = { $case: "insert", insert: InsertNode.fromPartial(object.nodeBody.insert) }; - } - if ( - object.nodeBody?.$case === "delete" && object.nodeBody?.delete !== undefined && object.nodeBody?.delete !== null - ) { - message.nodeBody = { $case: "delete", delete: DeleteNode.fromPartial(object.nodeBody.delete) }; - } - if ( - object.nodeBody?.$case === "update" && object.nodeBody?.update !== undefined && object.nodeBody?.update !== null - ) { - message.nodeBody = { $case: "update", update: UpdateNode.fromPartial(object.nodeBody.update) }; - } - if ( - object.nodeBody?.$case === "project" && - object.nodeBody?.project !== undefined && - object.nodeBody?.project !== null - ) { - message.nodeBody = { $case: "project", project: ProjectNode.fromPartial(object.nodeBody.project) }; - } - if ( - object.nodeBody?.$case === "hashAgg" && - object.nodeBody?.hashAgg !== undefined && - object.nodeBody?.hashAgg !== null - ) { - message.nodeBody = { $case: "hashAgg", hashAgg: HashAggNode.fromPartial(object.nodeBody.hashAgg) }; - } - if ( - object.nodeBody?.$case === "filter" && object.nodeBody?.filter !== undefined && object.nodeBody?.filter !== null - ) { - message.nodeBody = { $case: "filter", filter: FilterNode.fromPartial(object.nodeBody.filter) }; - } - if ( - object.nodeBody?.$case === "exchange" && - object.nodeBody?.exchange !== undefined && - object.nodeBody?.exchange !== null - ) { - message.nodeBody = { $case: "exchange", exchange: ExchangeNode.fromPartial(object.nodeBody.exchange) }; - } - if (object.nodeBody?.$case === "sort" && object.nodeBody?.sort !== undefined && object.nodeBody?.sort !== null) { - message.nodeBody = { $case: "sort", sort: SortNode.fromPartial(object.nodeBody.sort) }; - } - if ( - object.nodeBody?.$case === "nestedLoopJoin" && - object.nodeBody?.nestedLoopJoin !== undefined && - object.nodeBody?.nestedLoopJoin !== null - ) { - message.nodeBody = { - $case: "nestedLoopJoin", - nestedLoopJoin: NestedLoopJoinNode.fromPartial(object.nodeBody.nestedLoopJoin), - }; - } - if (object.nodeBody?.$case === "topN" && object.nodeBody?.topN !== undefined && object.nodeBody?.topN !== null) { - message.nodeBody = { $case: "topN", topN: TopNNode.fromPartial(object.nodeBody.topN) }; - } - if ( - object.nodeBody?.$case === "sortAgg" && - object.nodeBody?.sortAgg !== undefined && - object.nodeBody?.sortAgg !== null - ) { - message.nodeBody = { $case: "sortAgg", sortAgg: SortAggNode.fromPartial(object.nodeBody.sortAgg) }; - } - if ( - object.nodeBody?.$case === "rowSeqScan" && - object.nodeBody?.rowSeqScan !== undefined && - object.nodeBody?.rowSeqScan !== null - ) { - message.nodeBody = { $case: "rowSeqScan", rowSeqScan: RowSeqScanNode.fromPartial(object.nodeBody.rowSeqScan) }; - } - if (object.nodeBody?.$case === "limit" && object.nodeBody?.limit !== undefined && object.nodeBody?.limit !== null) { - message.nodeBody = { $case: "limit", limit: LimitNode.fromPartial(object.nodeBody.limit) }; - } - if ( - object.nodeBody?.$case === "values" && object.nodeBody?.values !== undefined && object.nodeBody?.values !== null - ) { - message.nodeBody = { $case: "values", values: ValuesNode.fromPartial(object.nodeBody.values) }; - } - if ( - object.nodeBody?.$case === "hashJoin" && - object.nodeBody?.hashJoin !== undefined && - object.nodeBody?.hashJoin !== null - ) { - message.nodeBody = { $case: "hashJoin", hashJoin: HashJoinNode.fromPartial(object.nodeBody.hashJoin) }; - } - if ( - object.nodeBody?.$case === "mergeSortExchange" && - object.nodeBody?.mergeSortExchange !== undefined && - object.nodeBody?.mergeSortExchange !== null - ) { - message.nodeBody = { - $case: "mergeSortExchange", - mergeSortExchange: MergeSortExchangeNode.fromPartial(object.nodeBody.mergeSortExchange), - }; - } - if ( - object.nodeBody?.$case === "hopWindow" && - object.nodeBody?.hopWindow !== undefined && - object.nodeBody?.hopWindow !== null - ) { - message.nodeBody = { $case: "hopWindow", hopWindow: HopWindowNode.fromPartial(object.nodeBody.hopWindow) }; - } - if ( - object.nodeBody?.$case === "tableFunction" && - object.nodeBody?.tableFunction !== undefined && - object.nodeBody?.tableFunction !== null - ) { - message.nodeBody = { - $case: "tableFunction", - tableFunction: TableFunctionNode.fromPartial(object.nodeBody.tableFunction), - }; - } - if ( - object.nodeBody?.$case === "sysRowSeqScan" && - object.nodeBody?.sysRowSeqScan !== undefined && - object.nodeBody?.sysRowSeqScan !== null - ) { - message.nodeBody = { - $case: "sysRowSeqScan", - sysRowSeqScan: SysRowSeqScanNode.fromPartial(object.nodeBody.sysRowSeqScan), - }; - } - if ( - object.nodeBody?.$case === "expand" && object.nodeBody?.expand !== undefined && object.nodeBody?.expand !== null - ) { - message.nodeBody = { $case: "expand", expand: ExpandNode.fromPartial(object.nodeBody.expand) }; - } - if ( - object.nodeBody?.$case === "localLookupJoin" && - object.nodeBody?.localLookupJoin !== undefined && - object.nodeBody?.localLookupJoin !== null - ) { - message.nodeBody = { - $case: "localLookupJoin", - localLookupJoin: LocalLookupJoinNode.fromPartial(object.nodeBody.localLookupJoin), - }; - } - if ( - object.nodeBody?.$case === "projectSet" && - object.nodeBody?.projectSet !== undefined && - object.nodeBody?.projectSet !== null - ) { - message.nodeBody = { $case: "projectSet", projectSet: ProjectSetNode.fromPartial(object.nodeBody.projectSet) }; - } - if (object.nodeBody?.$case === "union" && object.nodeBody?.union !== undefined && object.nodeBody?.union !== null) { - message.nodeBody = { $case: "union", union: UnionNode.fromPartial(object.nodeBody.union) }; - } - if ( - object.nodeBody?.$case === "groupTopN" && - object.nodeBody?.groupTopN !== undefined && - object.nodeBody?.groupTopN !== null - ) { - message.nodeBody = { $case: "groupTopN", groupTopN: GroupTopNNode.fromPartial(object.nodeBody.groupTopN) }; - } - if ( - object.nodeBody?.$case === "distributedLookupJoin" && - object.nodeBody?.distributedLookupJoin !== undefined && - object.nodeBody?.distributedLookupJoin !== null - ) { - message.nodeBody = { - $case: "distributedLookupJoin", - distributedLookupJoin: DistributedLookupJoinNode.fromPartial(object.nodeBody.distributedLookupJoin), - }; - } - if ( - object.nodeBody?.$case === "source" && object.nodeBody?.source !== undefined && object.nodeBody?.source !== null - ) { - message.nodeBody = { $case: "source", source: SourceNode.fromPartial(object.nodeBody.source) }; - } - message.identity = object.identity ?? ""; - return message; - }, -}; - -function createBaseExchangeInfo(): ExchangeInfo { - return { mode: ExchangeInfo_DistributionMode.UNSPECIFIED, distribution: undefined }; -} - -export const ExchangeInfo = { - fromJSON(object: any): ExchangeInfo { - return { - mode: isSet(object.mode) - ? exchangeInfo_DistributionModeFromJSON(object.mode) - : ExchangeInfo_DistributionMode.UNSPECIFIED, - distribution: isSet(object.broadcastInfo) - ? { $case: "broadcastInfo", broadcastInfo: ExchangeInfo_BroadcastInfo.fromJSON(object.broadcastInfo) } - : isSet(object.hashInfo) - ? { $case: "hashInfo", hashInfo: ExchangeInfo_HashInfo.fromJSON(object.hashInfo) } - : isSet(object.consistentHashInfo) - ? { - $case: "consistentHashInfo", - consistentHashInfo: ExchangeInfo_ConsistentHashInfo.fromJSON(object.consistentHashInfo), - } - : undefined, - }; - }, - - toJSON(message: ExchangeInfo): unknown { - const obj: any = {}; - message.mode !== undefined && (obj.mode = exchangeInfo_DistributionModeToJSON(message.mode)); - message.distribution?.$case === "broadcastInfo" && (obj.broadcastInfo = message.distribution?.broadcastInfo - ? ExchangeInfo_BroadcastInfo.toJSON(message.distribution?.broadcastInfo) - : undefined); - message.distribution?.$case === "hashInfo" && (obj.hashInfo = message.distribution?.hashInfo - ? ExchangeInfo_HashInfo.toJSON(message.distribution?.hashInfo) - : undefined); - message.distribution?.$case === "consistentHashInfo" && - (obj.consistentHashInfo = message.distribution?.consistentHashInfo - ? ExchangeInfo_ConsistentHashInfo.toJSON(message.distribution?.consistentHashInfo) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ExchangeInfo { - const message = createBaseExchangeInfo(); - message.mode = object.mode ?? ExchangeInfo_DistributionMode.UNSPECIFIED; - if ( - object.distribution?.$case === "broadcastInfo" && - object.distribution?.broadcastInfo !== undefined && - object.distribution?.broadcastInfo !== null - ) { - message.distribution = { - $case: "broadcastInfo", - broadcastInfo: ExchangeInfo_BroadcastInfo.fromPartial(object.distribution.broadcastInfo), - }; - } - if ( - object.distribution?.$case === "hashInfo" && - object.distribution?.hashInfo !== undefined && - object.distribution?.hashInfo !== null - ) { - message.distribution = { - $case: "hashInfo", - hashInfo: ExchangeInfo_HashInfo.fromPartial(object.distribution.hashInfo), - }; - } - if ( - object.distribution?.$case === "consistentHashInfo" && - object.distribution?.consistentHashInfo !== undefined && - object.distribution?.consistentHashInfo !== null - ) { - message.distribution = { - $case: "consistentHashInfo", - consistentHashInfo: ExchangeInfo_ConsistentHashInfo.fromPartial(object.distribution.consistentHashInfo), - }; - } - return message; - }, -}; - -function createBaseExchangeInfo_BroadcastInfo(): ExchangeInfo_BroadcastInfo { - return { count: 0 }; -} - -export const ExchangeInfo_BroadcastInfo = { - fromJSON(object: any): ExchangeInfo_BroadcastInfo { - return { count: isSet(object.count) ? Number(object.count) : 0 }; - }, - - toJSON(message: ExchangeInfo_BroadcastInfo): unknown { - const obj: any = {}; - message.count !== undefined && (obj.count = Math.round(message.count)); - return obj; - }, - - fromPartial, I>>(object: I): ExchangeInfo_BroadcastInfo { - const message = createBaseExchangeInfo_BroadcastInfo(); - message.count = object.count ?? 0; - return message; - }, -}; - -function createBaseExchangeInfo_HashInfo(): ExchangeInfo_HashInfo { - return { outputCount: 0, key: [] }; -} - -export const ExchangeInfo_HashInfo = { - fromJSON(object: any): ExchangeInfo_HashInfo { - return { - outputCount: isSet(object.outputCount) ? Number(object.outputCount) : 0, - key: Array.isArray(object?.key) ? object.key.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ExchangeInfo_HashInfo): unknown { - const obj: any = {}; - message.outputCount !== undefined && (obj.outputCount = Math.round(message.outputCount)); - if (message.key) { - obj.key = message.key.map((e) => Math.round(e)); - } else { - obj.key = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExchangeInfo_HashInfo { - const message = createBaseExchangeInfo_HashInfo(); - message.outputCount = object.outputCount ?? 0; - message.key = object.key?.map((e) => e) || []; - return message; - }, -}; - -function createBaseExchangeInfo_ConsistentHashInfo(): ExchangeInfo_ConsistentHashInfo { - return { vmap: [], key: [] }; -} - -export const ExchangeInfo_ConsistentHashInfo = { - fromJSON(object: any): ExchangeInfo_ConsistentHashInfo { - return { - vmap: Array.isArray(object?.vmap) ? object.vmap.map((e: any) => Number(e)) : [], - key: Array.isArray(object?.key) ? object.key.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ExchangeInfo_ConsistentHashInfo): unknown { - const obj: any = {}; - if (message.vmap) { - obj.vmap = message.vmap.map((e) => Math.round(e)); - } else { - obj.vmap = []; - } - if (message.key) { - obj.key = message.key.map((e) => Math.round(e)); - } else { - obj.key = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): ExchangeInfo_ConsistentHashInfo { - const message = createBaseExchangeInfo_ConsistentHashInfo(); - message.vmap = object.vmap?.map((e) => e) || []; - message.key = object.key?.map((e) => e) || []; - return message; - }, -}; - -function createBasePlanFragment(): PlanFragment { - return { root: undefined, exchangeInfo: undefined }; -} - -export const PlanFragment = { - fromJSON(object: any): PlanFragment { - return { - root: isSet(object.root) ? PlanNode.fromJSON(object.root) : undefined, - exchangeInfo: isSet(object.exchangeInfo) ? ExchangeInfo.fromJSON(object.exchangeInfo) : undefined, - }; - }, - - toJSON(message: PlanFragment): unknown { - const obj: any = {}; - message.root !== undefined && (obj.root = message.root ? PlanNode.toJSON(message.root) : undefined); - message.exchangeInfo !== undefined && - (obj.exchangeInfo = message.exchangeInfo ? ExchangeInfo.toJSON(message.exchangeInfo) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): PlanFragment { - const message = createBasePlanFragment(); - message.root = (object.root !== undefined && object.root !== null) ? PlanNode.fromPartial(object.root) : undefined; - message.exchangeInfo = (object.exchangeInfo !== undefined && object.exchangeInfo !== null) - ? ExchangeInfo.fromPartial(object.exchangeInfo) - : undefined; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/catalog.ts b/dashboard/proto/gen/catalog.ts deleted file mode 100644 index 0590f0a959ce6..0000000000000 --- a/dashboard/proto/gen/catalog.ts +++ /dev/null @@ -1,1314 +0,0 @@ -/* eslint-disable */ -import { ColumnOrder } from "./common"; -import { DataType } from "./data"; -import { ExprNode } from "./expr"; -import { ColumnCatalog, Field, RowFormatType, rowFormatTypeFromJSON, rowFormatTypeToJSON } from "./plan_common"; - -export const protobufPackage = "catalog"; - -export const SinkType = { - UNSPECIFIED: "UNSPECIFIED", - APPEND_ONLY: "APPEND_ONLY", - FORCE_APPEND_ONLY: "FORCE_APPEND_ONLY", - UPSERT: "UPSERT", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type SinkType = typeof SinkType[keyof typeof SinkType]; - -export function sinkTypeFromJSON(object: any): SinkType { - switch (object) { - case 0: - case "UNSPECIFIED": - return SinkType.UNSPECIFIED; - case 1: - case "APPEND_ONLY": - return SinkType.APPEND_ONLY; - case 2: - case "FORCE_APPEND_ONLY": - return SinkType.FORCE_APPEND_ONLY; - case 3: - case "UPSERT": - return SinkType.UPSERT; - case -1: - case "UNRECOGNIZED": - default: - return SinkType.UNRECOGNIZED; - } -} - -export function sinkTypeToJSON(object: SinkType): string { - switch (object) { - case SinkType.UNSPECIFIED: - return "UNSPECIFIED"; - case SinkType.APPEND_ONLY: - return "APPEND_ONLY"; - case SinkType.FORCE_APPEND_ONLY: - return "FORCE_APPEND_ONLY"; - case SinkType.UPSERT: - return "UPSERT"; - case SinkType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const HandleConflictBehavior = { - NO_CHECK_UNSPECIFIED: "NO_CHECK_UNSPECIFIED", - OVERWRITE: "OVERWRITE", - IGNORE: "IGNORE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type HandleConflictBehavior = typeof HandleConflictBehavior[keyof typeof HandleConflictBehavior]; - -export function handleConflictBehaviorFromJSON(object: any): HandleConflictBehavior { - switch (object) { - case 0: - case "NO_CHECK_UNSPECIFIED": - return HandleConflictBehavior.NO_CHECK_UNSPECIFIED; - case 1: - case "OVERWRITE": - return HandleConflictBehavior.OVERWRITE; - case 2: - case "IGNORE": - return HandleConflictBehavior.IGNORE; - case -1: - case "UNRECOGNIZED": - default: - return HandleConflictBehavior.UNRECOGNIZED; - } -} - -export function handleConflictBehaviorToJSON(object: HandleConflictBehavior): string { - switch (object) { - case HandleConflictBehavior.NO_CHECK_UNSPECIFIED: - return "NO_CHECK_UNSPECIFIED"; - case HandleConflictBehavior.OVERWRITE: - return "OVERWRITE"; - case HandleConflictBehavior.IGNORE: - return "IGNORE"; - case HandleConflictBehavior.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** A mapping of column indices. */ -export interface ColIndexMapping { - /** The size of the target space. */ - targetSize: number; - /** - * Each subscript is mapped to the corresponding element. - * For those not mapped, the value will be negative. - */ - map: number[]; -} - -export interface WatermarkDesc { - /** The column idx the watermark is on */ - watermarkIdx: number; - /** The expression to calculate the watermark value. */ - expr: ExprNode | undefined; -} - -export interface StreamSourceInfo { - rowFormat: RowFormatType; - rowSchemaLocation: string; - useSchemaRegistry: boolean; - protoMessageName: string; - csvDelimiter: number; - csvHasHeader: boolean; - upsertAvroPrimaryKey: string; -} - -export interface Source { - id: number; - schemaId: number; - databaseId: number; - name: string; - /** - * The column index of row ID. If the primary key is specified by the user, - * this will be `None`. - */ - rowIdIndex?: - | number - | undefined; - /** Columns of the source. */ - columns: ColumnCatalog[]; - /** - * Column id of the primary key specified by the user. If the user does not - * specify a primary key, the vector will be empty. - */ - pkColumnIds: number[]; - /** Properties specified by the user in WITH clause. */ - properties: { [key: string]: string }; - owner: number; - info: - | StreamSourceInfo - | undefined; - /** - * Define watermarks on the source. The `repeated` is just for forward - * compatibility, currently, only one watermark on the source - */ - watermarkDescs: WatermarkDesc[]; -} - -export interface Source_PropertiesEntry { - key: string; - value: string; -} - -export interface Sink { - id: number; - schemaId: number; - databaseId: number; - name: string; - columns: ColumnCatalog[]; - pk: ColumnOrder[]; - dependentRelations: number[]; - distributionKey: number[]; - /** pk_indices of the corresponding materialize operator's output. */ - streamKey: number[]; - sinkType: SinkType; - owner: number; - properties: { [key: string]: string }; - definition: string; -} - -export interface Sink_PropertiesEntry { - key: string; - value: string; -} - -export interface Index { - id: number; - schemaId: number; - databaseId: number; - name: string; - owner: number; - indexTableId: number; - primaryTableId: number; - /** - * Only `InputRef` type index is supported Now. - * The index of `InputRef` is the column index of the primary table. - */ - indexItem: ExprNode[]; - originalColumns: number[]; -} - -export interface Function { - id: number; - schemaId: number; - databaseId: number; - name: string; - owner: number; - argTypes: DataType[]; - returnType: DataType | undefined; - language: string; - link: string; - identifier: string; -} - -/** See `TableCatalog` struct in frontend crate for more information. */ -export interface Table { - id: number; - schemaId: number; - databaseId: number; - name: string; - columns: ColumnCatalog[]; - pk: ColumnOrder[]; - dependentRelations: number[]; - optionalAssociatedSourceId?: { $case: "associatedSourceId"; associatedSourceId: number }; - tableType: Table_TableType; - distributionKey: number[]; - /** pk_indices of the corresponding materialize operator's output. */ - streamKey: number[]; - appendOnly: boolean; - owner: number; - properties: { [key: string]: string }; - fragmentId: number; - /** - * an optional column index which is the vnode of each row computed by the - * table's consistent hash distribution - */ - vnodeColIndex?: - | number - | undefined; - /** - * An optional column index of row id. If the primary key is specified by users, - * this will be `None`. - */ - rowIdIndex?: - | number - | undefined; - /** - * The column indices which are stored in the state store's value with - * row-encoding. Currently is not supported yet and expected to be - * `[0..columns.len()]`. - */ - valueIndices: number[]; - definition: string; - handlePkConflictBehavior: HandleConflictBehavior; - readPrefixLenHint: number; - watermarkIndices: number[]; - distKeyInPk: number[]; - /** - * Per-table catalog version, used by schema change. `None` for internal tables and tests. - * Not to be confused with the global catalog version for notification service. - */ - version: Table_TableVersion | undefined; -} - -export const Table_TableType = { - UNSPECIFIED: "UNSPECIFIED", - TABLE: "TABLE", - MATERIALIZED_VIEW: "MATERIALIZED_VIEW", - INDEX: "INDEX", - INTERNAL: "INTERNAL", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type Table_TableType = typeof Table_TableType[keyof typeof Table_TableType]; - -export function table_TableTypeFromJSON(object: any): Table_TableType { - switch (object) { - case 0: - case "UNSPECIFIED": - return Table_TableType.UNSPECIFIED; - case 1: - case "TABLE": - return Table_TableType.TABLE; - case 2: - case "MATERIALIZED_VIEW": - return Table_TableType.MATERIALIZED_VIEW; - case 3: - case "INDEX": - return Table_TableType.INDEX; - case 4: - case "INTERNAL": - return Table_TableType.INTERNAL; - case -1: - case "UNRECOGNIZED": - default: - return Table_TableType.UNRECOGNIZED; - } -} - -export function table_TableTypeToJSON(object: Table_TableType): string { - switch (object) { - case Table_TableType.UNSPECIFIED: - return "UNSPECIFIED"; - case Table_TableType.TABLE: - return "TABLE"; - case Table_TableType.MATERIALIZED_VIEW: - return "MATERIALIZED_VIEW"; - case Table_TableType.INDEX: - return "INDEX"; - case Table_TableType.INTERNAL: - return "INTERNAL"; - case Table_TableType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface Table_TableVersion { - /** - * The version number, which will be 0 by default and be increased by 1 for - * each schema change in the frontend. - */ - version: number; - /** - * The ID of the next column to be added, which is used to make all columns - * in the table have unique IDs, even if some columns have been dropped. - */ - nextColumnId: number; -} - -export interface Table_PropertiesEntry { - key: string; - value: string; -} - -export interface View { - id: number; - schemaId: number; - databaseId: number; - name: string; - owner: number; - properties: { [key: string]: string }; - sql: string; - dependentRelations: number[]; - /** User-specified column names. */ - columns: Field[]; -} - -export interface View_PropertiesEntry { - key: string; - value: string; -} - -export interface Schema { - id: number; - databaseId: number; - name: string; - owner: number; -} - -export interface Database { - id: number; - name: string; - owner: number; -} - -function createBaseColIndexMapping(): ColIndexMapping { - return { targetSize: 0, map: [] }; -} - -export const ColIndexMapping = { - fromJSON(object: any): ColIndexMapping { - return { - targetSize: isSet(object.targetSize) ? Number(object.targetSize) : 0, - map: Array.isArray(object?.map) ? object.map.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ColIndexMapping): unknown { - const obj: any = {}; - message.targetSize !== undefined && (obj.targetSize = Math.round(message.targetSize)); - if (message.map) { - obj.map = message.map.map((e) => Math.round(e)); - } else { - obj.map = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ColIndexMapping { - const message = createBaseColIndexMapping(); - message.targetSize = object.targetSize ?? 0; - message.map = object.map?.map((e) => e) || []; - return message; - }, -}; - -function createBaseWatermarkDesc(): WatermarkDesc { - return { watermarkIdx: 0, expr: undefined }; -} - -export const WatermarkDesc = { - fromJSON(object: any): WatermarkDesc { - return { - watermarkIdx: isSet(object.watermarkIdx) ? Number(object.watermarkIdx) : 0, - expr: isSet(object.expr) ? ExprNode.fromJSON(object.expr) : undefined, - }; - }, - - toJSON(message: WatermarkDesc): unknown { - const obj: any = {}; - message.watermarkIdx !== undefined && (obj.watermarkIdx = Math.round(message.watermarkIdx)); - message.expr !== undefined && (obj.expr = message.expr ? ExprNode.toJSON(message.expr) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): WatermarkDesc { - const message = createBaseWatermarkDesc(); - message.watermarkIdx = object.watermarkIdx ?? 0; - message.expr = (object.expr !== undefined && object.expr !== null) ? ExprNode.fromPartial(object.expr) : undefined; - return message; - }, -}; - -function createBaseStreamSourceInfo(): StreamSourceInfo { - return { - rowFormat: RowFormatType.ROW_UNSPECIFIED, - rowSchemaLocation: "", - useSchemaRegistry: false, - protoMessageName: "", - csvDelimiter: 0, - csvHasHeader: false, - upsertAvroPrimaryKey: "", - }; -} - -export const StreamSourceInfo = { - fromJSON(object: any): StreamSourceInfo { - return { - rowFormat: isSet(object.rowFormat) ? rowFormatTypeFromJSON(object.rowFormat) : RowFormatType.ROW_UNSPECIFIED, - rowSchemaLocation: isSet(object.rowSchemaLocation) ? String(object.rowSchemaLocation) : "", - useSchemaRegistry: isSet(object.useSchemaRegistry) ? Boolean(object.useSchemaRegistry) : false, - protoMessageName: isSet(object.protoMessageName) ? String(object.protoMessageName) : "", - csvDelimiter: isSet(object.csvDelimiter) ? Number(object.csvDelimiter) : 0, - csvHasHeader: isSet(object.csvHasHeader) ? Boolean(object.csvHasHeader) : false, - upsertAvroPrimaryKey: isSet(object.upsertAvroPrimaryKey) ? String(object.upsertAvroPrimaryKey) : "", - }; - }, - - toJSON(message: StreamSourceInfo): unknown { - const obj: any = {}; - message.rowFormat !== undefined && (obj.rowFormat = rowFormatTypeToJSON(message.rowFormat)); - message.rowSchemaLocation !== undefined && (obj.rowSchemaLocation = message.rowSchemaLocation); - message.useSchemaRegistry !== undefined && (obj.useSchemaRegistry = message.useSchemaRegistry); - message.protoMessageName !== undefined && (obj.protoMessageName = message.protoMessageName); - message.csvDelimiter !== undefined && (obj.csvDelimiter = Math.round(message.csvDelimiter)); - message.csvHasHeader !== undefined && (obj.csvHasHeader = message.csvHasHeader); - message.upsertAvroPrimaryKey !== undefined && (obj.upsertAvroPrimaryKey = message.upsertAvroPrimaryKey); - return obj; - }, - - fromPartial, I>>(object: I): StreamSourceInfo { - const message = createBaseStreamSourceInfo(); - message.rowFormat = object.rowFormat ?? RowFormatType.ROW_UNSPECIFIED; - message.rowSchemaLocation = object.rowSchemaLocation ?? ""; - message.useSchemaRegistry = object.useSchemaRegistry ?? false; - message.protoMessageName = object.protoMessageName ?? ""; - message.csvDelimiter = object.csvDelimiter ?? 0; - message.csvHasHeader = object.csvHasHeader ?? false; - message.upsertAvroPrimaryKey = object.upsertAvroPrimaryKey ?? ""; - return message; - }, -}; - -function createBaseSource(): Source { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - rowIdIndex: undefined, - columns: [], - pkColumnIds: [], - properties: {}, - owner: 0, - info: undefined, - watermarkDescs: [], - }; -} - -export const Source = { - fromJSON(object: any): Source { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - rowIdIndex: isSet(object.rowIdIndex) ? Number(object.rowIdIndex) : undefined, - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnCatalog.fromJSON(e)) : [], - pkColumnIds: Array.isArray(object?.pkColumnIds) ? object.pkColumnIds.map((e: any) => Number(e)) : [], - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - owner: isSet(object.owner) ? Number(object.owner) : 0, - info: isSet(object.info) ? StreamSourceInfo.fromJSON(object.info) : undefined, - watermarkDescs: Array.isArray(object?.watermarkDescs) - ? object.watermarkDescs.map((e: any) => WatermarkDesc.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Source): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - message.rowIdIndex !== undefined && (obj.rowIdIndex = Math.round(message.rowIdIndex)); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnCatalog.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pkColumnIds) { - obj.pkColumnIds = message.pkColumnIds.map((e) => Math.round(e)); - } else { - obj.pkColumnIds = []; - } - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - message.info !== undefined && (obj.info = message.info ? StreamSourceInfo.toJSON(message.info) : undefined); - if (message.watermarkDescs) { - obj.watermarkDescs = message.watermarkDescs.map((e) => e ? WatermarkDesc.toJSON(e) : undefined); - } else { - obj.watermarkDescs = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Source { - const message = createBaseSource(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.rowIdIndex = object.rowIdIndex ?? undefined; - message.columns = object.columns?.map((e) => ColumnCatalog.fromPartial(e)) || []; - message.pkColumnIds = object.pkColumnIds?.map((e) => e) || []; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.owner = object.owner ?? 0; - message.info = (object.info !== undefined && object.info !== null) - ? StreamSourceInfo.fromPartial(object.info) - : undefined; - message.watermarkDescs = object.watermarkDescs?.map((e) => WatermarkDesc.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSource_PropertiesEntry(): Source_PropertiesEntry { - return { key: "", value: "" }; -} - -export const Source_PropertiesEntry = { - fromJSON(object: any): Source_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: Source_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): Source_PropertiesEntry { - const message = createBaseSource_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSink(): Sink { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - columns: [], - pk: [], - dependentRelations: [], - distributionKey: [], - streamKey: [], - sinkType: SinkType.UNSPECIFIED, - owner: 0, - properties: {}, - definition: "", - }; -} - -export const Sink = { - fromJSON(object: any): Sink { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnCatalog.fromJSON(e)) : [], - pk: Array.isArray(object?.pk) ? object.pk.map((e: any) => ColumnOrder.fromJSON(e)) : [], - dependentRelations: Array.isArray(object?.dependentRelations) - ? object.dependentRelations.map((e: any) => Number(e)) - : [], - distributionKey: Array.isArray(object?.distributionKey) - ? object.distributionKey.map((e: any) => Number(e)) - : [], - streamKey: Array.isArray(object?.streamKey) ? object.streamKey.map((e: any) => Number(e)) : [], - sinkType: isSet(object.sinkType) ? sinkTypeFromJSON(object.sinkType) : SinkType.UNSPECIFIED, - owner: isSet(object.owner) ? Number(object.owner) : 0, - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - definition: isSet(object.definition) ? String(object.definition) : "", - }; - }, - - toJSON(message: Sink): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnCatalog.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pk) { - obj.pk = message.pk.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.pk = []; - } - if (message.dependentRelations) { - obj.dependentRelations = message.dependentRelations.map((e) => Math.round(e)); - } else { - obj.dependentRelations = []; - } - if (message.distributionKey) { - obj.distributionKey = message.distributionKey.map((e) => Math.round(e)); - } else { - obj.distributionKey = []; - } - if (message.streamKey) { - obj.streamKey = message.streamKey.map((e) => Math.round(e)); - } else { - obj.streamKey = []; - } - message.sinkType !== undefined && (obj.sinkType = sinkTypeToJSON(message.sinkType)); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.definition !== undefined && (obj.definition = message.definition); - return obj; - }, - - fromPartial, I>>(object: I): Sink { - const message = createBaseSink(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.columns = object.columns?.map((e) => ColumnCatalog.fromPartial(e)) || []; - message.pk = object.pk?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.dependentRelations = object.dependentRelations?.map((e) => e) || []; - message.distributionKey = object.distributionKey?.map((e) => e) || []; - message.streamKey = object.streamKey?.map((e) => e) || []; - message.sinkType = object.sinkType ?? SinkType.UNSPECIFIED; - message.owner = object.owner ?? 0; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.definition = object.definition ?? ""; - return message; - }, -}; - -function createBaseSink_PropertiesEntry(): Sink_PropertiesEntry { - return { key: "", value: "" }; -} - -export const Sink_PropertiesEntry = { - fromJSON(object: any): Sink_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: Sink_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): Sink_PropertiesEntry { - const message = createBaseSink_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseIndex(): Index { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - owner: 0, - indexTableId: 0, - primaryTableId: 0, - indexItem: [], - originalColumns: [], - }; -} - -export const Index = { - fromJSON(object: any): Index { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - owner: isSet(object.owner) ? Number(object.owner) : 0, - indexTableId: isSet(object.indexTableId) ? Number(object.indexTableId) : 0, - primaryTableId: isSet(object.primaryTableId) ? Number(object.primaryTableId) : 0, - indexItem: Array.isArray(object?.indexItem) - ? object.indexItem.map((e: any) => ExprNode.fromJSON(e)) - : [], - originalColumns: Array.isArray(object?.originalColumns) ? object.originalColumns.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: Index): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - message.indexTableId !== undefined && (obj.indexTableId = Math.round(message.indexTableId)); - message.primaryTableId !== undefined && (obj.primaryTableId = Math.round(message.primaryTableId)); - if (message.indexItem) { - obj.indexItem = message.indexItem.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.indexItem = []; - } - if (message.originalColumns) { - obj.originalColumns = message.originalColumns.map((e) => Math.round(e)); - } else { - obj.originalColumns = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Index { - const message = createBaseIndex(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.owner = object.owner ?? 0; - message.indexTableId = object.indexTableId ?? 0; - message.primaryTableId = object.primaryTableId ?? 0; - message.indexItem = object.indexItem?.map((e) => ExprNode.fromPartial(e)) || []; - message.originalColumns = object.originalColumns?.map((e) => e) || []; - return message; - }, -}; - -function createBaseFunction(): Function { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - owner: 0, - argTypes: [], - returnType: undefined, - language: "", - link: "", - identifier: "", - }; -} - -export const Function = { - fromJSON(object: any): Function { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - owner: isSet(object.owner) ? Number(object.owner) : 0, - argTypes: Array.isArray(object?.argTypes) - ? object.argTypes.map((e: any) => DataType.fromJSON(e)) - : [], - returnType: isSet(object.returnType) ? DataType.fromJSON(object.returnType) : undefined, - language: isSet(object.language) ? String(object.language) : "", - link: isSet(object.link) ? String(object.link) : "", - identifier: isSet(object.identifier) ? String(object.identifier) : "", - }; - }, - - toJSON(message: Function): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - if (message.argTypes) { - obj.argTypes = message.argTypes.map((e) => e ? DataType.toJSON(e) : undefined); - } else { - obj.argTypes = []; - } - message.returnType !== undefined && - (obj.returnType = message.returnType ? DataType.toJSON(message.returnType) : undefined); - message.language !== undefined && (obj.language = message.language); - message.link !== undefined && (obj.link = message.link); - message.identifier !== undefined && (obj.identifier = message.identifier); - return obj; - }, - - fromPartial, I>>(object: I): Function { - const message = createBaseFunction(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.owner = object.owner ?? 0; - message.argTypes = object.argTypes?.map((e) => DataType.fromPartial(e)) || []; - message.returnType = (object.returnType !== undefined && object.returnType !== null) - ? DataType.fromPartial(object.returnType) - : undefined; - message.language = object.language ?? ""; - message.link = object.link ?? ""; - message.identifier = object.identifier ?? ""; - return message; - }, -}; - -function createBaseTable(): Table { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - columns: [], - pk: [], - dependentRelations: [], - optionalAssociatedSourceId: undefined, - tableType: Table_TableType.UNSPECIFIED, - distributionKey: [], - streamKey: [], - appendOnly: false, - owner: 0, - properties: {}, - fragmentId: 0, - vnodeColIndex: undefined, - rowIdIndex: undefined, - valueIndices: [], - definition: "", - handlePkConflictBehavior: HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - readPrefixLenHint: 0, - watermarkIndices: [], - distKeyInPk: [], - version: undefined, - }; -} - -export const Table = { - fromJSON(object: any): Table { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnCatalog.fromJSON(e)) : [], - pk: Array.isArray(object?.pk) ? object.pk.map((e: any) => ColumnOrder.fromJSON(e)) : [], - dependentRelations: Array.isArray(object?.dependentRelations) - ? object.dependentRelations.map((e: any) => Number(e)) - : [], - optionalAssociatedSourceId: isSet(object.associatedSourceId) - ? { $case: "associatedSourceId", associatedSourceId: Number(object.associatedSourceId) } - : undefined, - tableType: isSet(object.tableType) ? table_TableTypeFromJSON(object.tableType) : Table_TableType.UNSPECIFIED, - distributionKey: Array.isArray(object?.distributionKey) - ? object.distributionKey.map((e: any) => Number(e)) - : [], - streamKey: Array.isArray(object?.streamKey) ? object.streamKey.map((e: any) => Number(e)) : [], - appendOnly: isSet(object.appendOnly) ? Boolean(object.appendOnly) : false, - owner: isSet(object.owner) ? Number(object.owner) : 0, - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - fragmentId: isSet(object.fragmentId) ? Number(object.fragmentId) : 0, - vnodeColIndex: isSet(object.vnodeColIndex) ? Number(object.vnodeColIndex) : undefined, - rowIdIndex: isSet(object.rowIdIndex) ? Number(object.rowIdIndex) : undefined, - valueIndices: Array.isArray(object?.valueIndices) - ? object.valueIndices.map((e: any) => Number(e)) - : [], - definition: isSet(object.definition) ? String(object.definition) : "", - handlePkConflictBehavior: isSet(object.handlePkConflictBehavior) - ? handleConflictBehaviorFromJSON(object.handlePkConflictBehavior) - : HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - readPrefixLenHint: isSet(object.readPrefixLenHint) ? Number(object.readPrefixLenHint) : 0, - watermarkIndices: Array.isArray(object?.watermarkIndices) - ? object.watermarkIndices.map((e: any) => Number(e)) - : [], - distKeyInPk: Array.isArray(object?.distKeyInPk) ? object.distKeyInPk.map((e: any) => Number(e)) : [], - version: isSet(object.version) ? Table_TableVersion.fromJSON(object.version) : undefined, - }; - }, - - toJSON(message: Table): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnCatalog.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pk) { - obj.pk = message.pk.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.pk = []; - } - if (message.dependentRelations) { - obj.dependentRelations = message.dependentRelations.map((e) => Math.round(e)); - } else { - obj.dependentRelations = []; - } - message.optionalAssociatedSourceId?.$case === "associatedSourceId" && - (obj.associatedSourceId = Math.round(message.optionalAssociatedSourceId?.associatedSourceId)); - message.tableType !== undefined && (obj.tableType = table_TableTypeToJSON(message.tableType)); - if (message.distributionKey) { - obj.distributionKey = message.distributionKey.map((e) => Math.round(e)); - } else { - obj.distributionKey = []; - } - if (message.streamKey) { - obj.streamKey = message.streamKey.map((e) => Math.round(e)); - } else { - obj.streamKey = []; - } - message.appendOnly !== undefined && (obj.appendOnly = message.appendOnly); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.fragmentId !== undefined && (obj.fragmentId = Math.round(message.fragmentId)); - message.vnodeColIndex !== undefined && (obj.vnodeColIndex = Math.round(message.vnodeColIndex)); - message.rowIdIndex !== undefined && (obj.rowIdIndex = Math.round(message.rowIdIndex)); - if (message.valueIndices) { - obj.valueIndices = message.valueIndices.map((e) => Math.round(e)); - } else { - obj.valueIndices = []; - } - message.definition !== undefined && (obj.definition = message.definition); - message.handlePkConflictBehavior !== undefined && - (obj.handlePkConflictBehavior = handleConflictBehaviorToJSON(message.handlePkConflictBehavior)); - message.readPrefixLenHint !== undefined && (obj.readPrefixLenHint = Math.round(message.readPrefixLenHint)); - if (message.watermarkIndices) { - obj.watermarkIndices = message.watermarkIndices.map((e) => Math.round(e)); - } else { - obj.watermarkIndices = []; - } - if (message.distKeyInPk) { - obj.distKeyInPk = message.distKeyInPk.map((e) => Math.round(e)); - } else { - obj.distKeyInPk = []; - } - message.version !== undefined && - (obj.version = message.version ? Table_TableVersion.toJSON(message.version) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): Table { - const message = createBaseTable(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.columns = object.columns?.map((e) => ColumnCatalog.fromPartial(e)) || []; - message.pk = object.pk?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.dependentRelations = object.dependentRelations?.map((e) => e) || []; - if ( - object.optionalAssociatedSourceId?.$case === "associatedSourceId" && - object.optionalAssociatedSourceId?.associatedSourceId !== undefined && - object.optionalAssociatedSourceId?.associatedSourceId !== null - ) { - message.optionalAssociatedSourceId = { - $case: "associatedSourceId", - associatedSourceId: object.optionalAssociatedSourceId.associatedSourceId, - }; - } - message.tableType = object.tableType ?? Table_TableType.UNSPECIFIED; - message.distributionKey = object.distributionKey?.map((e) => e) || []; - message.streamKey = object.streamKey?.map((e) => e) || []; - message.appendOnly = object.appendOnly ?? false; - message.owner = object.owner ?? 0; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.fragmentId = object.fragmentId ?? 0; - message.vnodeColIndex = object.vnodeColIndex ?? undefined; - message.rowIdIndex = object.rowIdIndex ?? undefined; - message.valueIndices = object.valueIndices?.map((e) => e) || []; - message.definition = object.definition ?? ""; - message.handlePkConflictBehavior = object.handlePkConflictBehavior ?? HandleConflictBehavior.NO_CHECK_UNSPECIFIED; - message.readPrefixLenHint = object.readPrefixLenHint ?? 0; - message.watermarkIndices = object.watermarkIndices?.map((e) => e) || []; - message.distKeyInPk = object.distKeyInPk?.map((e) => e) || []; - message.version = (object.version !== undefined && object.version !== null) - ? Table_TableVersion.fromPartial(object.version) - : undefined; - return message; - }, -}; - -function createBaseTable_TableVersion(): Table_TableVersion { - return { version: 0, nextColumnId: 0 }; -} - -export const Table_TableVersion = { - fromJSON(object: any): Table_TableVersion { - return { - version: isSet(object.version) ? Number(object.version) : 0, - nextColumnId: isSet(object.nextColumnId) ? Number(object.nextColumnId) : 0, - }; - }, - - toJSON(message: Table_TableVersion): unknown { - const obj: any = {}; - message.version !== undefined && (obj.version = Math.round(message.version)); - message.nextColumnId !== undefined && (obj.nextColumnId = Math.round(message.nextColumnId)); - return obj; - }, - - fromPartial, I>>(object: I): Table_TableVersion { - const message = createBaseTable_TableVersion(); - message.version = object.version ?? 0; - message.nextColumnId = object.nextColumnId ?? 0; - return message; - }, -}; - -function createBaseTable_PropertiesEntry(): Table_PropertiesEntry { - return { key: "", value: "" }; -} - -export const Table_PropertiesEntry = { - fromJSON(object: any): Table_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: Table_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): Table_PropertiesEntry { - const message = createBaseTable_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseView(): View { - return { - id: 0, - schemaId: 0, - databaseId: 0, - name: "", - owner: 0, - properties: {}, - sql: "", - dependentRelations: [], - columns: [], - }; -} - -export const View = { - fromJSON(object: any): View { - return { - id: isSet(object.id) ? Number(object.id) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - owner: isSet(object.owner) ? Number(object.owner) : 0, - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - sql: isSet(object.sql) ? String(object.sql) : "", - dependentRelations: Array.isArray(object?.dependentRelations) - ? object.dependentRelations.map((e: any) => Number(e)) - : [], - columns: Array.isArray(object?.columns) - ? object.columns.map((e: any) => Field.fromJSON(e)) - : [], - }; - }, - - toJSON(message: View): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.sql !== undefined && (obj.sql = message.sql); - if (message.dependentRelations) { - obj.dependentRelations = message.dependentRelations.map((e) => Math.round(e)); - } else { - obj.dependentRelations = []; - } - if (message.columns) { - obj.columns = message.columns.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.columns = []; - } - return obj; - }, - - fromPartial, I>>(object: I): View { - const message = createBaseView(); - message.id = object.id ?? 0; - message.schemaId = object.schemaId ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.owner = object.owner ?? 0; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.sql = object.sql ?? ""; - message.dependentRelations = object.dependentRelations?.map((e) => e) || []; - message.columns = object.columns?.map((e) => Field.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseView_PropertiesEntry(): View_PropertiesEntry { - return { key: "", value: "" }; -} - -export const View_PropertiesEntry = { - fromJSON(object: any): View_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: View_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): View_PropertiesEntry { - const message = createBaseView_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSchema(): Schema { - return { id: 0, databaseId: 0, name: "", owner: 0 }; -} - -export const Schema = { - fromJSON(object: any): Schema { - return { - id: isSet(object.id) ? Number(object.id) : 0, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - name: isSet(object.name) ? String(object.name) : "", - owner: isSet(object.owner) ? Number(object.owner) : 0, - }; - }, - - toJSON(message: Schema): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.name !== undefined && (obj.name = message.name); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - return obj; - }, - - fromPartial, I>>(object: I): Schema { - const message = createBaseSchema(); - message.id = object.id ?? 0; - message.databaseId = object.databaseId ?? 0; - message.name = object.name ?? ""; - message.owner = object.owner ?? 0; - return message; - }, -}; - -function createBaseDatabase(): Database { - return { id: 0, name: "", owner: 0 }; -} - -export const Database = { - fromJSON(object: any): Database { - return { - id: isSet(object.id) ? Number(object.id) : 0, - name: isSet(object.name) ? String(object.name) : "", - owner: isSet(object.owner) ? Number(object.owner) : 0, - }; - }, - - toJSON(message: Database): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.name !== undefined && (obj.name = message.name); - message.owner !== undefined && (obj.owner = Math.round(message.owner)); - return obj; - }, - - fromPartial, I>>(object: I): Database { - const message = createBaseDatabase(); - message.id = object.id ?? 0; - message.name = object.name ?? ""; - message.owner = object.owner ?? 0; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/common.ts b/dashboard/proto/gen/common.ts deleted file mode 100644 index f3cefe80d6710..0000000000000 --- a/dashboard/proto/gen/common.ts +++ /dev/null @@ -1,659 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "common"; - -export const WorkerType = { - UNSPECIFIED: "UNSPECIFIED", - FRONTEND: "FRONTEND", - COMPUTE_NODE: "COMPUTE_NODE", - RISE_CTL: "RISE_CTL", - COMPACTOR: "COMPACTOR", - META: "META", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type WorkerType = typeof WorkerType[keyof typeof WorkerType]; - -export function workerTypeFromJSON(object: any): WorkerType { - switch (object) { - case 0: - case "UNSPECIFIED": - return WorkerType.UNSPECIFIED; - case 1: - case "FRONTEND": - return WorkerType.FRONTEND; - case 2: - case "COMPUTE_NODE": - return WorkerType.COMPUTE_NODE; - case 3: - case "RISE_CTL": - return WorkerType.RISE_CTL; - case 4: - case "COMPACTOR": - return WorkerType.COMPACTOR; - case 5: - case "META": - return WorkerType.META; - case -1: - case "UNRECOGNIZED": - default: - return WorkerType.UNRECOGNIZED; - } -} - -export function workerTypeToJSON(object: WorkerType): string { - switch (object) { - case WorkerType.UNSPECIFIED: - return "UNSPECIFIED"; - case WorkerType.FRONTEND: - return "FRONTEND"; - case WorkerType.COMPUTE_NODE: - return "COMPUTE_NODE"; - case WorkerType.RISE_CTL: - return "RISE_CTL"; - case WorkerType.COMPACTOR: - return "COMPACTOR"; - case WorkerType.META: - return "META"; - case WorkerType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const Direction = { - DIRECTION_UNSPECIFIED: "DIRECTION_UNSPECIFIED", - DIRECTION_ASCENDING: "DIRECTION_ASCENDING", - DIRECTION_DESCENDING: "DIRECTION_DESCENDING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type Direction = typeof Direction[keyof typeof Direction]; - -export function directionFromJSON(object: any): Direction { - switch (object) { - case 0: - case "DIRECTION_UNSPECIFIED": - return Direction.DIRECTION_UNSPECIFIED; - case 1: - case "DIRECTION_ASCENDING": - return Direction.DIRECTION_ASCENDING; - case 2: - case "DIRECTION_DESCENDING": - return Direction.DIRECTION_DESCENDING; - case -1: - case "UNRECOGNIZED": - default: - return Direction.UNRECOGNIZED; - } -} - -export function directionToJSON(object: Direction): string { - switch (object) { - case Direction.DIRECTION_UNSPECIFIED: - return "DIRECTION_UNSPECIFIED"; - case Direction.DIRECTION_ASCENDING: - return "DIRECTION_ASCENDING"; - case Direction.DIRECTION_DESCENDING: - return "DIRECTION_DESCENDING"; - case Direction.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface Status { - code: Status_Code; - message: string; -} - -export const Status_Code = { - UNSPECIFIED: "UNSPECIFIED", - OK: "OK", - UNKNOWN_WORKER: "UNKNOWN_WORKER", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type Status_Code = typeof Status_Code[keyof typeof Status_Code]; - -export function status_CodeFromJSON(object: any): Status_Code { - switch (object) { - case 0: - case "UNSPECIFIED": - return Status_Code.UNSPECIFIED; - case 1: - case "OK": - return Status_Code.OK; - case 2: - case "UNKNOWN_WORKER": - return Status_Code.UNKNOWN_WORKER; - case -1: - case "UNRECOGNIZED": - default: - return Status_Code.UNRECOGNIZED; - } -} - -export function status_CodeToJSON(object: Status_Code): string { - switch (object) { - case Status_Code.UNSPECIFIED: - return "UNSPECIFIED"; - case Status_Code.OK: - return "OK"; - case Status_Code.UNKNOWN_WORKER: - return "UNKNOWN_WORKER"; - case Status_Code.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface HostAddress { - host: string; - port: number; -} - -/** Encode which host machine an actor resides. */ -export interface ActorInfo { - actorId: number; - host: HostAddress | undefined; -} - -export interface ParallelUnit { - id: number; - workerNodeId: number; -} - -export interface WorkerNode { - id: number; - type: WorkerType; - host: HostAddress | undefined; - state: WorkerNode_State; - parallelUnits: ParallelUnit[]; -} - -export const WorkerNode_State = { - UNSPECIFIED: "UNSPECIFIED", - STARTING: "STARTING", - RUNNING: "RUNNING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type WorkerNode_State = typeof WorkerNode_State[keyof typeof WorkerNode_State]; - -export function workerNode_StateFromJSON(object: any): WorkerNode_State { - switch (object) { - case 0: - case "UNSPECIFIED": - return WorkerNode_State.UNSPECIFIED; - case 1: - case "STARTING": - return WorkerNode_State.STARTING; - case 2: - case "RUNNING": - return WorkerNode_State.RUNNING; - case -1: - case "UNRECOGNIZED": - default: - return WorkerNode_State.UNRECOGNIZED; - } -} - -export function workerNode_StateToJSON(object: WorkerNode_State): string { - switch (object) { - case WorkerNode_State.UNSPECIFIED: - return "UNSPECIFIED"; - case WorkerNode_State.STARTING: - return "STARTING"; - case WorkerNode_State.RUNNING: - return "RUNNING"; - case WorkerNode_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface Buffer { - compression: Buffer_CompressionType; - body: Uint8Array; -} - -export const Buffer_CompressionType = { - UNSPECIFIED: "UNSPECIFIED", - NONE: "NONE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type Buffer_CompressionType = typeof Buffer_CompressionType[keyof typeof Buffer_CompressionType]; - -export function buffer_CompressionTypeFromJSON(object: any): Buffer_CompressionType { - switch (object) { - case 0: - case "UNSPECIFIED": - return Buffer_CompressionType.UNSPECIFIED; - case 1: - case "NONE": - return Buffer_CompressionType.NONE; - case -1: - case "UNRECOGNIZED": - default: - return Buffer_CompressionType.UNRECOGNIZED; - } -} - -export function buffer_CompressionTypeToJSON(object: Buffer_CompressionType): string { - switch (object) { - case Buffer_CompressionType.UNSPECIFIED: - return "UNSPECIFIED"; - case Buffer_CompressionType.NONE: - return "NONE"; - case Buffer_CompressionType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Vnode mapping for stream fragments. Stores mapping from virtual node to parallel unit id. */ -export interface ParallelUnitMapping { - originalIndices: number[]; - data: number[]; -} - -export interface BatchQueryEpoch { - epoch?: { $case: "committed"; committed: number } | { $case: "current"; current: number } | { - $case: "backup"; - backup: number; - }; -} - -export interface OrderType { - /** - * TODO(rc): enable `NULLS FIRST | LAST` - * NullsAre nulls_are = 2; - */ - direction: Direction; -} - -/** Column index with an order type (ASC or DESC). Used to represent a sort key (`repeated ColumnOrder`). */ -export interface ColumnOrder { - columnIndex: number; - orderType: OrderType | undefined; -} - -function createBaseStatus(): Status { - return { code: Status_Code.UNSPECIFIED, message: "" }; -} - -export const Status = { - fromJSON(object: any): Status { - return { - code: isSet(object.code) ? status_CodeFromJSON(object.code) : Status_Code.UNSPECIFIED, - message: isSet(object.message) ? String(object.message) : "", - }; - }, - - toJSON(message: Status): unknown { - const obj: any = {}; - message.code !== undefined && (obj.code = status_CodeToJSON(message.code)); - message.message !== undefined && (obj.message = message.message); - return obj; - }, - - fromPartial, I>>(object: I): Status { - const message = createBaseStatus(); - message.code = object.code ?? Status_Code.UNSPECIFIED; - message.message = object.message ?? ""; - return message; - }, -}; - -function createBaseHostAddress(): HostAddress { - return { host: "", port: 0 }; -} - -export const HostAddress = { - fromJSON(object: any): HostAddress { - return { host: isSet(object.host) ? String(object.host) : "", port: isSet(object.port) ? Number(object.port) : 0 }; - }, - - toJSON(message: HostAddress): unknown { - const obj: any = {}; - message.host !== undefined && (obj.host = message.host); - message.port !== undefined && (obj.port = Math.round(message.port)); - return obj; - }, - - fromPartial, I>>(object: I): HostAddress { - const message = createBaseHostAddress(); - message.host = object.host ?? ""; - message.port = object.port ?? 0; - return message; - }, -}; - -function createBaseActorInfo(): ActorInfo { - return { actorId: 0, host: undefined }; -} - -export const ActorInfo = { - fromJSON(object: any): ActorInfo { - return { - actorId: isSet(object.actorId) ? Number(object.actorId) : 0, - host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined, - }; - }, - - toJSON(message: ActorInfo): unknown { - const obj: any = {}; - message.actorId !== undefined && (obj.actorId = Math.round(message.actorId)); - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ActorInfo { - const message = createBaseActorInfo(); - message.actorId = object.actorId ?? 0; - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - return message; - }, -}; - -function createBaseParallelUnit(): ParallelUnit { - return { id: 0, workerNodeId: 0 }; -} - -export const ParallelUnit = { - fromJSON(object: any): ParallelUnit { - return { - id: isSet(object.id) ? Number(object.id) : 0, - workerNodeId: isSet(object.workerNodeId) ? Number(object.workerNodeId) : 0, - }; - }, - - toJSON(message: ParallelUnit): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.workerNodeId !== undefined && (obj.workerNodeId = Math.round(message.workerNodeId)); - return obj; - }, - - fromPartial, I>>(object: I): ParallelUnit { - const message = createBaseParallelUnit(); - message.id = object.id ?? 0; - message.workerNodeId = object.workerNodeId ?? 0; - return message; - }, -}; - -function createBaseWorkerNode(): WorkerNode { - return { - id: 0, - type: WorkerType.UNSPECIFIED, - host: undefined, - state: WorkerNode_State.UNSPECIFIED, - parallelUnits: [], - }; -} - -export const WorkerNode = { - fromJSON(object: any): WorkerNode { - return { - id: isSet(object.id) ? Number(object.id) : 0, - type: isSet(object.type) ? workerTypeFromJSON(object.type) : WorkerType.UNSPECIFIED, - host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined, - state: isSet(object.state) ? workerNode_StateFromJSON(object.state) : WorkerNode_State.UNSPECIFIED, - parallelUnits: Array.isArray(object?.parallelUnits) - ? object.parallelUnits.map((e: any) => ParallelUnit.fromJSON(e)) - : [], - }; - }, - - toJSON(message: WorkerNode): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.type !== undefined && (obj.type = workerTypeToJSON(message.type)); - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - message.state !== undefined && (obj.state = workerNode_StateToJSON(message.state)); - if (message.parallelUnits) { - obj.parallelUnits = message.parallelUnits.map((e) => e ? ParallelUnit.toJSON(e) : undefined); - } else { - obj.parallelUnits = []; - } - return obj; - }, - - fromPartial, I>>(object: I): WorkerNode { - const message = createBaseWorkerNode(); - message.id = object.id ?? 0; - message.type = object.type ?? WorkerType.UNSPECIFIED; - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - message.state = object.state ?? WorkerNode_State.UNSPECIFIED; - message.parallelUnits = object.parallelUnits?.map((e) => ParallelUnit.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseBuffer(): Buffer { - return { compression: Buffer_CompressionType.UNSPECIFIED, body: new Uint8Array() }; -} - -export const Buffer = { - fromJSON(object: any): Buffer { - return { - compression: isSet(object.compression) - ? buffer_CompressionTypeFromJSON(object.compression) - : Buffer_CompressionType.UNSPECIFIED, - body: isSet(object.body) ? bytesFromBase64(object.body) : new Uint8Array(), - }; - }, - - toJSON(message: Buffer): unknown { - const obj: any = {}; - message.compression !== undefined && (obj.compression = buffer_CompressionTypeToJSON(message.compression)); - message.body !== undefined && - (obj.body = base64FromBytes(message.body !== undefined ? message.body : new Uint8Array())); - return obj; - }, - - fromPartial, I>>(object: I): Buffer { - const message = createBaseBuffer(); - message.compression = object.compression ?? Buffer_CompressionType.UNSPECIFIED; - message.body = object.body ?? new Uint8Array(); - return message; - }, -}; - -function createBaseParallelUnitMapping(): ParallelUnitMapping { - return { originalIndices: [], data: [] }; -} - -export const ParallelUnitMapping = { - fromJSON(object: any): ParallelUnitMapping { - return { - originalIndices: Array.isArray(object?.originalIndices) ? object.originalIndices.map((e: any) => Number(e)) : [], - data: Array.isArray(object?.data) ? object.data.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ParallelUnitMapping): unknown { - const obj: any = {}; - if (message.originalIndices) { - obj.originalIndices = message.originalIndices.map((e) => Math.round(e)); - } else { - obj.originalIndices = []; - } - if (message.data) { - obj.data = message.data.map((e) => Math.round(e)); - } else { - obj.data = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ParallelUnitMapping { - const message = createBaseParallelUnitMapping(); - message.originalIndices = object.originalIndices?.map((e) => e) || []; - message.data = object.data?.map((e) => e) || []; - return message; - }, -}; - -function createBaseBatchQueryEpoch(): BatchQueryEpoch { - return { epoch: undefined }; -} - -export const BatchQueryEpoch = { - fromJSON(object: any): BatchQueryEpoch { - return { - epoch: isSet(object.committed) - ? { $case: "committed", committed: Number(object.committed) } - : isSet(object.current) - ? { $case: "current", current: Number(object.current) } - : isSet(object.backup) - ? { $case: "backup", backup: Number(object.backup) } - : undefined, - }; - }, - - toJSON(message: BatchQueryEpoch): unknown { - const obj: any = {}; - message.epoch?.$case === "committed" && (obj.committed = Math.round(message.epoch?.committed)); - message.epoch?.$case === "current" && (obj.current = Math.round(message.epoch?.current)); - message.epoch?.$case === "backup" && (obj.backup = Math.round(message.epoch?.backup)); - return obj; - }, - - fromPartial, I>>(object: I): BatchQueryEpoch { - const message = createBaseBatchQueryEpoch(); - if ( - object.epoch?.$case === "committed" && object.epoch?.committed !== undefined && object.epoch?.committed !== null - ) { - message.epoch = { $case: "committed", committed: object.epoch.committed }; - } - if (object.epoch?.$case === "current" && object.epoch?.current !== undefined && object.epoch?.current !== null) { - message.epoch = { $case: "current", current: object.epoch.current }; - } - if (object.epoch?.$case === "backup" && object.epoch?.backup !== undefined && object.epoch?.backup !== null) { - message.epoch = { $case: "backup", backup: object.epoch.backup }; - } - return message; - }, -}; - -function createBaseOrderType(): OrderType { - return { direction: Direction.DIRECTION_UNSPECIFIED }; -} - -export const OrderType = { - fromJSON(object: any): OrderType { - return { - direction: isSet(object.direction) ? directionFromJSON(object.direction) : Direction.DIRECTION_UNSPECIFIED, - }; - }, - - toJSON(message: OrderType): unknown { - const obj: any = {}; - message.direction !== undefined && (obj.direction = directionToJSON(message.direction)); - return obj; - }, - - fromPartial, I>>(object: I): OrderType { - const message = createBaseOrderType(); - message.direction = object.direction ?? Direction.DIRECTION_UNSPECIFIED; - return message; - }, -}; - -function createBaseColumnOrder(): ColumnOrder { - return { columnIndex: 0, orderType: undefined }; -} - -export const ColumnOrder = { - fromJSON(object: any): ColumnOrder { - return { - columnIndex: isSet(object.columnIndex) ? Number(object.columnIndex) : 0, - orderType: isSet(object.orderType) ? OrderType.fromJSON(object.orderType) : undefined, - }; - }, - - toJSON(message: ColumnOrder): unknown { - const obj: any = {}; - message.columnIndex !== undefined && (obj.columnIndex = Math.round(message.columnIndex)); - message.orderType !== undefined && - (obj.orderType = message.orderType ? OrderType.toJSON(message.orderType) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ColumnOrder { - const message = createBaseColumnOrder(); - message.columnIndex = object.columnIndex ?? 0; - message.orderType = (object.orderType !== undefined && object.orderType !== null) - ? OrderType.fromPartial(object.orderType) - : undefined; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/compactor.ts b/dashboard/proto/gen/compactor.ts deleted file mode 100644 index 3fee992c1f710..0000000000000 --- a/dashboard/proto/gen/compactor.ts +++ /dev/null @@ -1,100 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "compactor"; - -export interface CompactorRuntimeConfig { - maxConcurrentTaskNumber: number; -} - -export interface SetRuntimeConfigRequest { - config: CompactorRuntimeConfig | undefined; -} - -export interface SetRuntimeConfigResponse { -} - -function createBaseCompactorRuntimeConfig(): CompactorRuntimeConfig { - return { maxConcurrentTaskNumber: 0 }; -} - -export const CompactorRuntimeConfig = { - fromJSON(object: any): CompactorRuntimeConfig { - return { - maxConcurrentTaskNumber: isSet(object.maxConcurrentTaskNumber) ? Number(object.maxConcurrentTaskNumber) : 0, - }; - }, - - toJSON(message: CompactorRuntimeConfig): unknown { - const obj: any = {}; - message.maxConcurrentTaskNumber !== undefined && - (obj.maxConcurrentTaskNumber = Math.round(message.maxConcurrentTaskNumber)); - return obj; - }, - - fromPartial, I>>(object: I): CompactorRuntimeConfig { - const message = createBaseCompactorRuntimeConfig(); - message.maxConcurrentTaskNumber = object.maxConcurrentTaskNumber ?? 0; - return message; - }, -}; - -function createBaseSetRuntimeConfigRequest(): SetRuntimeConfigRequest { - return { config: undefined }; -} - -export const SetRuntimeConfigRequest = { - fromJSON(object: any): SetRuntimeConfigRequest { - return { config: isSet(object.config) ? CompactorRuntimeConfig.fromJSON(object.config) : undefined }; - }, - - toJSON(message: SetRuntimeConfigRequest): unknown { - const obj: any = {}; - message.config !== undefined && - (obj.config = message.config ? CompactorRuntimeConfig.toJSON(message.config) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SetRuntimeConfigRequest { - const message = createBaseSetRuntimeConfigRequest(); - message.config = (object.config !== undefined && object.config !== null) - ? CompactorRuntimeConfig.fromPartial(object.config) - : undefined; - return message; - }, -}; - -function createBaseSetRuntimeConfigResponse(): SetRuntimeConfigResponse { - return {}; -} - -export const SetRuntimeConfigResponse = { - fromJSON(_: any): SetRuntimeConfigResponse { - return {}; - }, - - toJSON(_: SetRuntimeConfigResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): SetRuntimeConfigResponse { - const message = createBaseSetRuntimeConfigResponse(); - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/compute.ts b/dashboard/proto/gen/compute.ts deleted file mode 100644 index 95d6595232634..0000000000000 --- a/dashboard/proto/gen/compute.ts +++ /dev/null @@ -1,74 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "compute"; - -export interface ShowConfigRequest { -} - -export interface ShowConfigResponse { - batchConfig: string; - streamConfig: string; -} - -function createBaseShowConfigRequest(): ShowConfigRequest { - return {}; -} - -export const ShowConfigRequest = { - fromJSON(_: any): ShowConfigRequest { - return {}; - }, - - toJSON(_: ShowConfigRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): ShowConfigRequest { - const message = createBaseShowConfigRequest(); - return message; - }, -}; - -function createBaseShowConfigResponse(): ShowConfigResponse { - return { batchConfig: "", streamConfig: "" }; -} - -export const ShowConfigResponse = { - fromJSON(object: any): ShowConfigResponse { - return { - batchConfig: isSet(object.batchConfig) ? String(object.batchConfig) : "", - streamConfig: isSet(object.streamConfig) ? String(object.streamConfig) : "", - }; - }, - - toJSON(message: ShowConfigResponse): unknown { - const obj: any = {}; - message.batchConfig !== undefined && (obj.batchConfig = message.batchConfig); - message.streamConfig !== undefined && (obj.streamConfig = message.streamConfig); - return obj; - }, - - fromPartial, I>>(object: I): ShowConfigResponse { - const message = createBaseShowConfigResponse(); - message.batchConfig = object.batchConfig ?? ""; - message.streamConfig = object.streamConfig ?? ""; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/connector_service.ts b/dashboard/proto/gen/connector_service.ts deleted file mode 100644 index 7b3b6b4295d7d..0000000000000 --- a/dashboard/proto/gen/connector_service.ts +++ /dev/null @@ -1,1066 +0,0 @@ -/* eslint-disable */ -import { - DataType_TypeName, - dataType_TypeNameFromJSON, - dataType_TypeNameToJSON, - Op, - opFromJSON, - opToJSON, -} from "./data"; - -export const protobufPackage = "connector_service"; - -export const SourceType = { - UNSPECIFIED: "UNSPECIFIED", - MYSQL: "MYSQL", - POSTGRES: "POSTGRES", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type SourceType = typeof SourceType[keyof typeof SourceType]; - -export function sourceTypeFromJSON(object: any): SourceType { - switch (object) { - case 0: - case "UNSPECIFIED": - return SourceType.UNSPECIFIED; - case 1: - case "MYSQL": - return SourceType.MYSQL; - case 2: - case "POSTGRES": - return SourceType.POSTGRES; - case -1: - case "UNRECOGNIZED": - default: - return SourceType.UNRECOGNIZED; - } -} - -export function sourceTypeToJSON(object: SourceType): string { - switch (object) { - case SourceType.UNSPECIFIED: - return "UNSPECIFIED"; - case SourceType.MYSQL: - return "MYSQL"; - case SourceType.POSTGRES: - return "POSTGRES"; - case SourceType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableSchema { - columns: TableSchema_Column[]; - pkIndices: number[]; -} - -export interface TableSchema_Column { - name: string; - dataType: DataType_TypeName; -} - -export interface ValidationError { - errorMessage: string; -} - -export interface SinkConfig { - sinkType: string; - properties: { [key: string]: string }; - tableSchema: TableSchema | undefined; -} - -export interface SinkConfig_PropertiesEntry { - key: string; - value: string; -} - -export interface SinkStreamRequest { - request?: - | { $case: "start"; start: SinkStreamRequest_StartSink } - | { $case: "startEpoch"; startEpoch: SinkStreamRequest_StartEpoch } - | { $case: "write"; write: SinkStreamRequest_WriteBatch } - | { $case: "sync"; sync: SinkStreamRequest_SyncBatch }; -} - -export interface SinkStreamRequest_StartSink { - sinkConfig: SinkConfig | undefined; -} - -export interface SinkStreamRequest_WriteBatch { - payload?: { $case: "jsonPayload"; jsonPayload: SinkStreamRequest_WriteBatch_JsonPayload }; - batchId: number; - epoch: number; -} - -export interface SinkStreamRequest_WriteBatch_JsonPayload { - rowOps: SinkStreamRequest_WriteBatch_JsonPayload_RowOp[]; -} - -export interface SinkStreamRequest_WriteBatch_JsonPayload_RowOp { - opType: Op; - line: string; -} - -export interface SinkStreamRequest_StartEpoch { - epoch: number; -} - -export interface SinkStreamRequest_SyncBatch { - epoch: number; -} - -export interface SinkResponse { - response?: - | { $case: "sync"; sync: SinkResponse_SyncResponse } - | { $case: "startEpoch"; startEpoch: SinkResponse_StartEpochResponse } - | { $case: "write"; write: SinkResponse_WriteResponse } - | { $case: "start"; start: SinkResponse_StartResponse }; -} - -export interface SinkResponse_SyncResponse { - epoch: number; -} - -export interface SinkResponse_StartEpochResponse { - epoch: number; -} - -export interface SinkResponse_WriteResponse { - epoch: number; - batchId: number; -} - -export interface SinkResponse_StartResponse { -} - -export interface ValidateSinkRequest { - sinkConfig: SinkConfig | undefined; -} - -export interface ValidateSinkResponse { - error: ValidationError | undefined; -} - -export interface CdcMessage { - payload: string; - partition: string; - offset: string; -} - -export interface GetEventStreamRequest { - request?: { $case: "validate"; validate: GetEventStreamRequest_ValidateProperties } | { - $case: "start"; - start: GetEventStreamRequest_StartSource; - }; -} - -export interface GetEventStreamRequest_ValidateProperties { - sourceId: number; - sourceType: SourceType; - properties: { [key: string]: string }; - tableSchema: TableSchema | undefined; -} - -export interface GetEventStreamRequest_ValidateProperties_PropertiesEntry { - key: string; - value: string; -} - -export interface GetEventStreamRequest_StartSource { - sourceId: number; - sourceType: SourceType; - startOffset: string; - properties: { [key: string]: string }; -} - -export interface GetEventStreamRequest_StartSource_PropertiesEntry { - key: string; - value: string; -} - -export interface GetEventStreamResponse { - sourceId: number; - events: CdcMessage[]; -} - -function createBaseTableSchema(): TableSchema { - return { columns: [], pkIndices: [] }; -} - -export const TableSchema = { - fromJSON(object: any): TableSchema { - return { - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => TableSchema_Column.fromJSON(e)) : [], - pkIndices: Array.isArray(object?.pkIndices) ? object.pkIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: TableSchema): unknown { - const obj: any = {}; - if (message.columns) { - obj.columns = message.columns.map((e) => e ? TableSchema_Column.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pkIndices) { - obj.pkIndices = message.pkIndices.map((e) => Math.round(e)); - } else { - obj.pkIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): TableSchema { - const message = createBaseTableSchema(); - message.columns = object.columns?.map((e) => TableSchema_Column.fromPartial(e)) || []; - message.pkIndices = object.pkIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTableSchema_Column(): TableSchema_Column { - return { name: "", dataType: DataType_TypeName.TYPE_UNSPECIFIED }; -} - -export const TableSchema_Column = { - fromJSON(object: any): TableSchema_Column { - return { - name: isSet(object.name) ? String(object.name) : "", - dataType: isSet(object.dataType) - ? dataType_TypeNameFromJSON(object.dataType) - : DataType_TypeName.TYPE_UNSPECIFIED, - }; - }, - - toJSON(message: TableSchema_Column): unknown { - const obj: any = {}; - message.name !== undefined && (obj.name = message.name); - message.dataType !== undefined && (obj.dataType = dataType_TypeNameToJSON(message.dataType)); - return obj; - }, - - fromPartial, I>>(object: I): TableSchema_Column { - const message = createBaseTableSchema_Column(); - message.name = object.name ?? ""; - message.dataType = object.dataType ?? DataType_TypeName.TYPE_UNSPECIFIED; - return message; - }, -}; - -function createBaseValidationError(): ValidationError { - return { errorMessage: "" }; -} - -export const ValidationError = { - fromJSON(object: any): ValidationError { - return { errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "" }; - }, - - toJSON(message: ValidationError): unknown { - const obj: any = {}; - message.errorMessage !== undefined && (obj.errorMessage = message.errorMessage); - return obj; - }, - - fromPartial, I>>(object: I): ValidationError { - const message = createBaseValidationError(); - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBaseSinkConfig(): SinkConfig { - return { sinkType: "", properties: {}, tableSchema: undefined }; -} - -export const SinkConfig = { - fromJSON(object: any): SinkConfig { - return { - sinkType: isSet(object.sinkType) ? String(object.sinkType) : "", - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - tableSchema: isSet(object.tableSchema) ? TableSchema.fromJSON(object.tableSchema) : undefined, - }; - }, - - toJSON(message: SinkConfig): unknown { - const obj: any = {}; - message.sinkType !== undefined && (obj.sinkType = message.sinkType); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.tableSchema !== undefined && - (obj.tableSchema = message.tableSchema ? TableSchema.toJSON(message.tableSchema) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SinkConfig { - const message = createBaseSinkConfig(); - message.sinkType = object.sinkType ?? ""; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.tableSchema = (object.tableSchema !== undefined && object.tableSchema !== null) - ? TableSchema.fromPartial(object.tableSchema) - : undefined; - return message; - }, -}; - -function createBaseSinkConfig_PropertiesEntry(): SinkConfig_PropertiesEntry { - return { key: "", value: "" }; -} - -export const SinkConfig_PropertiesEntry = { - fromJSON(object: any): SinkConfig_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: SinkConfig_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): SinkConfig_PropertiesEntry { - const message = createBaseSinkConfig_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSinkStreamRequest(): SinkStreamRequest { - return { request: undefined }; -} - -export const SinkStreamRequest = { - fromJSON(object: any): SinkStreamRequest { - return { - request: isSet(object.start) - ? { $case: "start", start: SinkStreamRequest_StartSink.fromJSON(object.start) } - : isSet(object.startEpoch) - ? { $case: "startEpoch", startEpoch: SinkStreamRequest_StartEpoch.fromJSON(object.startEpoch) } - : isSet(object.write) - ? { $case: "write", write: SinkStreamRequest_WriteBatch.fromJSON(object.write) } - : isSet(object.sync) - ? { $case: "sync", sync: SinkStreamRequest_SyncBatch.fromJSON(object.sync) } - : undefined, - }; - }, - - toJSON(message: SinkStreamRequest): unknown { - const obj: any = {}; - message.request?.$case === "start" && - (obj.start = message.request?.start ? SinkStreamRequest_StartSink.toJSON(message.request?.start) : undefined); - message.request?.$case === "startEpoch" && (obj.startEpoch = message.request?.startEpoch - ? SinkStreamRequest_StartEpoch.toJSON(message.request?.startEpoch) - : undefined); - message.request?.$case === "write" && - (obj.write = message.request?.write ? SinkStreamRequest_WriteBatch.toJSON(message.request?.write) : undefined); - message.request?.$case === "sync" && - (obj.sync = message.request?.sync ? SinkStreamRequest_SyncBatch.toJSON(message.request?.sync) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SinkStreamRequest { - const message = createBaseSinkStreamRequest(); - if (object.request?.$case === "start" && object.request?.start !== undefined && object.request?.start !== null) { - message.request = { $case: "start", start: SinkStreamRequest_StartSink.fromPartial(object.request.start) }; - } - if ( - object.request?.$case === "startEpoch" && - object.request?.startEpoch !== undefined && - object.request?.startEpoch !== null - ) { - message.request = { - $case: "startEpoch", - startEpoch: SinkStreamRequest_StartEpoch.fromPartial(object.request.startEpoch), - }; - } - if (object.request?.$case === "write" && object.request?.write !== undefined && object.request?.write !== null) { - message.request = { $case: "write", write: SinkStreamRequest_WriteBatch.fromPartial(object.request.write) }; - } - if (object.request?.$case === "sync" && object.request?.sync !== undefined && object.request?.sync !== null) { - message.request = { $case: "sync", sync: SinkStreamRequest_SyncBatch.fromPartial(object.request.sync) }; - } - return message; - }, -}; - -function createBaseSinkStreamRequest_StartSink(): SinkStreamRequest_StartSink { - return { sinkConfig: undefined }; -} - -export const SinkStreamRequest_StartSink = { - fromJSON(object: any): SinkStreamRequest_StartSink { - return { sinkConfig: isSet(object.sinkConfig) ? SinkConfig.fromJSON(object.sinkConfig) : undefined }; - }, - - toJSON(message: SinkStreamRequest_StartSink): unknown { - const obj: any = {}; - message.sinkConfig !== undefined && - (obj.sinkConfig = message.sinkConfig ? SinkConfig.toJSON(message.sinkConfig) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SinkStreamRequest_StartSink { - const message = createBaseSinkStreamRequest_StartSink(); - message.sinkConfig = (object.sinkConfig !== undefined && object.sinkConfig !== null) - ? SinkConfig.fromPartial(object.sinkConfig) - : undefined; - return message; - }, -}; - -function createBaseSinkStreamRequest_WriteBatch(): SinkStreamRequest_WriteBatch { - return { payload: undefined, batchId: 0, epoch: 0 }; -} - -export const SinkStreamRequest_WriteBatch = { - fromJSON(object: any): SinkStreamRequest_WriteBatch { - return { - payload: isSet(object.jsonPayload) - ? { $case: "jsonPayload", jsonPayload: SinkStreamRequest_WriteBatch_JsonPayload.fromJSON(object.jsonPayload) } - : undefined, - batchId: isSet(object.batchId) ? Number(object.batchId) : 0, - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - }; - }, - - toJSON(message: SinkStreamRequest_WriteBatch): unknown { - const obj: any = {}; - message.payload?.$case === "jsonPayload" && (obj.jsonPayload = message.payload?.jsonPayload - ? SinkStreamRequest_WriteBatch_JsonPayload.toJSON(message.payload?.jsonPayload) - : undefined); - message.batchId !== undefined && (obj.batchId = Math.round(message.batchId)); - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): SinkStreamRequest_WriteBatch { - const message = createBaseSinkStreamRequest_WriteBatch(); - if ( - object.payload?.$case === "jsonPayload" && - object.payload?.jsonPayload !== undefined && - object.payload?.jsonPayload !== null - ) { - message.payload = { - $case: "jsonPayload", - jsonPayload: SinkStreamRequest_WriteBatch_JsonPayload.fromPartial(object.payload.jsonPayload), - }; - } - message.batchId = object.batchId ?? 0; - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseSinkStreamRequest_WriteBatch_JsonPayload(): SinkStreamRequest_WriteBatch_JsonPayload { - return { rowOps: [] }; -} - -export const SinkStreamRequest_WriteBatch_JsonPayload = { - fromJSON(object: any): SinkStreamRequest_WriteBatch_JsonPayload { - return { - rowOps: Array.isArray(object?.rowOps) - ? object.rowOps.map((e: any) => SinkStreamRequest_WriteBatch_JsonPayload_RowOp.fromJSON(e)) - : [], - }; - }, - - toJSON(message: SinkStreamRequest_WriteBatch_JsonPayload): unknown { - const obj: any = {}; - if (message.rowOps) { - obj.rowOps = message.rowOps.map((e) => e ? SinkStreamRequest_WriteBatch_JsonPayload_RowOp.toJSON(e) : undefined); - } else { - obj.rowOps = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): SinkStreamRequest_WriteBatch_JsonPayload { - const message = createBaseSinkStreamRequest_WriteBatch_JsonPayload(); - message.rowOps = object.rowOps?.map((e) => SinkStreamRequest_WriteBatch_JsonPayload_RowOp.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSinkStreamRequest_WriteBatch_JsonPayload_RowOp(): SinkStreamRequest_WriteBatch_JsonPayload_RowOp { - return { opType: Op.OP_UNSPECIFIED, line: "" }; -} - -export const SinkStreamRequest_WriteBatch_JsonPayload_RowOp = { - fromJSON(object: any): SinkStreamRequest_WriteBatch_JsonPayload_RowOp { - return { - opType: isSet(object.opType) ? opFromJSON(object.opType) : Op.OP_UNSPECIFIED, - line: isSet(object.line) ? String(object.line) : "", - }; - }, - - toJSON(message: SinkStreamRequest_WriteBatch_JsonPayload_RowOp): unknown { - const obj: any = {}; - message.opType !== undefined && (obj.opType = opToJSON(message.opType)); - message.line !== undefined && (obj.line = message.line); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SinkStreamRequest_WriteBatch_JsonPayload_RowOp { - const message = createBaseSinkStreamRequest_WriteBatch_JsonPayload_RowOp(); - message.opType = object.opType ?? Op.OP_UNSPECIFIED; - message.line = object.line ?? ""; - return message; - }, -}; - -function createBaseSinkStreamRequest_StartEpoch(): SinkStreamRequest_StartEpoch { - return { epoch: 0 }; -} - -export const SinkStreamRequest_StartEpoch = { - fromJSON(object: any): SinkStreamRequest_StartEpoch { - return { epoch: isSet(object.epoch) ? Number(object.epoch) : 0 }; - }, - - toJSON(message: SinkStreamRequest_StartEpoch): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): SinkStreamRequest_StartEpoch { - const message = createBaseSinkStreamRequest_StartEpoch(); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseSinkStreamRequest_SyncBatch(): SinkStreamRequest_SyncBatch { - return { epoch: 0 }; -} - -export const SinkStreamRequest_SyncBatch = { - fromJSON(object: any): SinkStreamRequest_SyncBatch { - return { epoch: isSet(object.epoch) ? Number(object.epoch) : 0 }; - }, - - toJSON(message: SinkStreamRequest_SyncBatch): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): SinkStreamRequest_SyncBatch { - const message = createBaseSinkStreamRequest_SyncBatch(); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseSinkResponse(): SinkResponse { - return { response: undefined }; -} - -export const SinkResponse = { - fromJSON(object: any): SinkResponse { - return { - response: isSet(object.sync) - ? { $case: "sync", sync: SinkResponse_SyncResponse.fromJSON(object.sync) } - : isSet(object.startEpoch) - ? { $case: "startEpoch", startEpoch: SinkResponse_StartEpochResponse.fromJSON(object.startEpoch) } - : isSet(object.write) - ? { $case: "write", write: SinkResponse_WriteResponse.fromJSON(object.write) } - : isSet(object.start) - ? { $case: "start", start: SinkResponse_StartResponse.fromJSON(object.start) } - : undefined, - }; - }, - - toJSON(message: SinkResponse): unknown { - const obj: any = {}; - message.response?.$case === "sync" && - (obj.sync = message.response?.sync ? SinkResponse_SyncResponse.toJSON(message.response?.sync) : undefined); - message.response?.$case === "startEpoch" && (obj.startEpoch = message.response?.startEpoch - ? SinkResponse_StartEpochResponse.toJSON(message.response?.startEpoch) - : undefined); - message.response?.$case === "write" && - (obj.write = message.response?.write ? SinkResponse_WriteResponse.toJSON(message.response?.write) : undefined); - message.response?.$case === "start" && - (obj.start = message.response?.start ? SinkResponse_StartResponse.toJSON(message.response?.start) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SinkResponse { - const message = createBaseSinkResponse(); - if (object.response?.$case === "sync" && object.response?.sync !== undefined && object.response?.sync !== null) { - message.response = { $case: "sync", sync: SinkResponse_SyncResponse.fromPartial(object.response.sync) }; - } - if ( - object.response?.$case === "startEpoch" && - object.response?.startEpoch !== undefined && - object.response?.startEpoch !== null - ) { - message.response = { - $case: "startEpoch", - startEpoch: SinkResponse_StartEpochResponse.fromPartial(object.response.startEpoch), - }; - } - if (object.response?.$case === "write" && object.response?.write !== undefined && object.response?.write !== null) { - message.response = { $case: "write", write: SinkResponse_WriteResponse.fromPartial(object.response.write) }; - } - if (object.response?.$case === "start" && object.response?.start !== undefined && object.response?.start !== null) { - message.response = { $case: "start", start: SinkResponse_StartResponse.fromPartial(object.response.start) }; - } - return message; - }, -}; - -function createBaseSinkResponse_SyncResponse(): SinkResponse_SyncResponse { - return { epoch: 0 }; -} - -export const SinkResponse_SyncResponse = { - fromJSON(object: any): SinkResponse_SyncResponse { - return { epoch: isSet(object.epoch) ? Number(object.epoch) : 0 }; - }, - - toJSON(message: SinkResponse_SyncResponse): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): SinkResponse_SyncResponse { - const message = createBaseSinkResponse_SyncResponse(); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseSinkResponse_StartEpochResponse(): SinkResponse_StartEpochResponse { - return { epoch: 0 }; -} - -export const SinkResponse_StartEpochResponse = { - fromJSON(object: any): SinkResponse_StartEpochResponse { - return { epoch: isSet(object.epoch) ? Number(object.epoch) : 0 }; - }, - - toJSON(message: SinkResponse_StartEpochResponse): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SinkResponse_StartEpochResponse { - const message = createBaseSinkResponse_StartEpochResponse(); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseSinkResponse_WriteResponse(): SinkResponse_WriteResponse { - return { epoch: 0, batchId: 0 }; -} - -export const SinkResponse_WriteResponse = { - fromJSON(object: any): SinkResponse_WriteResponse { - return { - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - batchId: isSet(object.batchId) ? Number(object.batchId) : 0, - }; - }, - - toJSON(message: SinkResponse_WriteResponse): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - message.batchId !== undefined && (obj.batchId = Math.round(message.batchId)); - return obj; - }, - - fromPartial, I>>(object: I): SinkResponse_WriteResponse { - const message = createBaseSinkResponse_WriteResponse(); - message.epoch = object.epoch ?? 0; - message.batchId = object.batchId ?? 0; - return message; - }, -}; - -function createBaseSinkResponse_StartResponse(): SinkResponse_StartResponse { - return {}; -} - -export const SinkResponse_StartResponse = { - fromJSON(_: any): SinkResponse_StartResponse { - return {}; - }, - - toJSON(_: SinkResponse_StartResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): SinkResponse_StartResponse { - const message = createBaseSinkResponse_StartResponse(); - return message; - }, -}; - -function createBaseValidateSinkRequest(): ValidateSinkRequest { - return { sinkConfig: undefined }; -} - -export const ValidateSinkRequest = { - fromJSON(object: any): ValidateSinkRequest { - return { sinkConfig: isSet(object.sinkConfig) ? SinkConfig.fromJSON(object.sinkConfig) : undefined }; - }, - - toJSON(message: ValidateSinkRequest): unknown { - const obj: any = {}; - message.sinkConfig !== undefined && - (obj.sinkConfig = message.sinkConfig ? SinkConfig.toJSON(message.sinkConfig) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ValidateSinkRequest { - const message = createBaseValidateSinkRequest(); - message.sinkConfig = (object.sinkConfig !== undefined && object.sinkConfig !== null) - ? SinkConfig.fromPartial(object.sinkConfig) - : undefined; - return message; - }, -}; - -function createBaseValidateSinkResponse(): ValidateSinkResponse { - return { error: undefined }; -} - -export const ValidateSinkResponse = { - fromJSON(object: any): ValidateSinkResponse { - return { error: isSet(object.error) ? ValidationError.fromJSON(object.error) : undefined }; - }, - - toJSON(message: ValidateSinkResponse): unknown { - const obj: any = {}; - message.error !== undefined && (obj.error = message.error ? ValidationError.toJSON(message.error) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ValidateSinkResponse { - const message = createBaseValidateSinkResponse(); - message.error = (object.error !== undefined && object.error !== null) - ? ValidationError.fromPartial(object.error) - : undefined; - return message; - }, -}; - -function createBaseCdcMessage(): CdcMessage { - return { payload: "", partition: "", offset: "" }; -} - -export const CdcMessage = { - fromJSON(object: any): CdcMessage { - return { - payload: isSet(object.payload) ? String(object.payload) : "", - partition: isSet(object.partition) ? String(object.partition) : "", - offset: isSet(object.offset) ? String(object.offset) : "", - }; - }, - - toJSON(message: CdcMessage): unknown { - const obj: any = {}; - message.payload !== undefined && (obj.payload = message.payload); - message.partition !== undefined && (obj.partition = message.partition); - message.offset !== undefined && (obj.offset = message.offset); - return obj; - }, - - fromPartial, I>>(object: I): CdcMessage { - const message = createBaseCdcMessage(); - message.payload = object.payload ?? ""; - message.partition = object.partition ?? ""; - message.offset = object.offset ?? ""; - return message; - }, -}; - -function createBaseGetEventStreamRequest(): GetEventStreamRequest { - return { request: undefined }; -} - -export const GetEventStreamRequest = { - fromJSON(object: any): GetEventStreamRequest { - return { - request: isSet(object.validate) - ? { $case: "validate", validate: GetEventStreamRequest_ValidateProperties.fromJSON(object.validate) } - : isSet(object.start) - ? { $case: "start", start: GetEventStreamRequest_StartSource.fromJSON(object.start) } - : undefined, - }; - }, - - toJSON(message: GetEventStreamRequest): unknown { - const obj: any = {}; - message.request?.$case === "validate" && (obj.validate = message.request?.validate - ? GetEventStreamRequest_ValidateProperties.toJSON(message.request?.validate) - : undefined); - message.request?.$case === "start" && - (obj.start = message.request?.start - ? GetEventStreamRequest_StartSource.toJSON(message.request?.start) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetEventStreamRequest { - const message = createBaseGetEventStreamRequest(); - if ( - object.request?.$case === "validate" && - object.request?.validate !== undefined && - object.request?.validate !== null - ) { - message.request = { - $case: "validate", - validate: GetEventStreamRequest_ValidateProperties.fromPartial(object.request.validate), - }; - } - if (object.request?.$case === "start" && object.request?.start !== undefined && object.request?.start !== null) { - message.request = { $case: "start", start: GetEventStreamRequest_StartSource.fromPartial(object.request.start) }; - } - return message; - }, -}; - -function createBaseGetEventStreamRequest_ValidateProperties(): GetEventStreamRequest_ValidateProperties { - return { sourceId: 0, sourceType: SourceType.UNSPECIFIED, properties: {}, tableSchema: undefined }; -} - -export const GetEventStreamRequest_ValidateProperties = { - fromJSON(object: any): GetEventStreamRequest_ValidateProperties { - return { - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - sourceType: isSet(object.sourceType) ? sourceTypeFromJSON(object.sourceType) : SourceType.UNSPECIFIED, - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - tableSchema: isSet(object.tableSchema) ? TableSchema.fromJSON(object.tableSchema) : undefined, - }; - }, - - toJSON(message: GetEventStreamRequest_ValidateProperties): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - message.sourceType !== undefined && (obj.sourceType = sourceTypeToJSON(message.sourceType)); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.tableSchema !== undefined && - (obj.tableSchema = message.tableSchema ? TableSchema.toJSON(message.tableSchema) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetEventStreamRequest_ValidateProperties { - const message = createBaseGetEventStreamRequest_ValidateProperties(); - message.sourceId = object.sourceId ?? 0; - message.sourceType = object.sourceType ?? SourceType.UNSPECIFIED; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.tableSchema = (object.tableSchema !== undefined && object.tableSchema !== null) - ? TableSchema.fromPartial(object.tableSchema) - : undefined; - return message; - }, -}; - -function createBaseGetEventStreamRequest_ValidateProperties_PropertiesEntry(): GetEventStreamRequest_ValidateProperties_PropertiesEntry { - return { key: "", value: "" }; -} - -export const GetEventStreamRequest_ValidateProperties_PropertiesEntry = { - fromJSON(object: any): GetEventStreamRequest_ValidateProperties_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: GetEventStreamRequest_ValidateProperties_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetEventStreamRequest_ValidateProperties_PropertiesEntry { - const message = createBaseGetEventStreamRequest_ValidateProperties_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseGetEventStreamRequest_StartSource(): GetEventStreamRequest_StartSource { - return { sourceId: 0, sourceType: SourceType.UNSPECIFIED, startOffset: "", properties: {} }; -} - -export const GetEventStreamRequest_StartSource = { - fromJSON(object: any): GetEventStreamRequest_StartSource { - return { - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - sourceType: isSet(object.sourceType) ? sourceTypeFromJSON(object.sourceType) : SourceType.UNSPECIFIED, - startOffset: isSet(object.startOffset) ? String(object.startOffset) : "", - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: GetEventStreamRequest_StartSource): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - message.sourceType !== undefined && (obj.sourceType = sourceTypeToJSON(message.sourceType)); - message.startOffset !== undefined && (obj.startOffset = message.startOffset); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetEventStreamRequest_StartSource { - const message = createBaseGetEventStreamRequest_StartSource(); - message.sourceId = object.sourceId ?? 0; - message.sourceType = object.sourceType ?? SourceType.UNSPECIFIED; - message.startOffset = object.startOffset ?? ""; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseGetEventStreamRequest_StartSource_PropertiesEntry(): GetEventStreamRequest_StartSource_PropertiesEntry { - return { key: "", value: "" }; -} - -export const GetEventStreamRequest_StartSource_PropertiesEntry = { - fromJSON(object: any): GetEventStreamRequest_StartSource_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: GetEventStreamRequest_StartSource_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetEventStreamRequest_StartSource_PropertiesEntry { - const message = createBaseGetEventStreamRequest_StartSource_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseGetEventStreamResponse(): GetEventStreamResponse { - return { sourceId: 0, events: [] }; -} - -export const GetEventStreamResponse = { - fromJSON(object: any): GetEventStreamResponse { - return { - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - events: Array.isArray(object?.events) ? object.events.map((e: any) => CdcMessage.fromJSON(e)) : [], - }; - }, - - toJSON(message: GetEventStreamResponse): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - if (message.events) { - obj.events = message.events.map((e) => e ? CdcMessage.toJSON(e) : undefined); - } else { - obj.events = []; - } - return obj; - }, - - fromPartial, I>>(object: I): GetEventStreamResponse { - const message = createBaseGetEventStreamResponse(); - message.sourceId = object.sourceId ?? 0; - message.events = object.events?.map((e) => CdcMessage.fromPartial(e)) || []; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/data.ts b/dashboard/proto/gen/data.ts deleted file mode 100644 index 1b865bc65a17a..0000000000000 --- a/dashboard/proto/gen/data.ts +++ /dev/null @@ -1,958 +0,0 @@ -/* eslint-disable */ -import { Buffer } from "./common"; - -export const protobufPackage = "data"; - -export const RwArrayType = { - UNSPECIFIED: "UNSPECIFIED", - INT16: "INT16", - INT32: "INT32", - INT64: "INT64", - FLOAT32: "FLOAT32", - FLOAT64: "FLOAT64", - UTF8: "UTF8", - BOOL: "BOOL", - DECIMAL: "DECIMAL", - DATE: "DATE", - TIME: "TIME", - TIMESTAMP: "TIMESTAMP", - INTERVAL: "INTERVAL", - STRUCT: "STRUCT", - LIST: "LIST", - BYTEA: "BYTEA", - JSONB: "JSONB", - SERIAL: "SERIAL", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type RwArrayType = typeof RwArrayType[keyof typeof RwArrayType]; - -export function rwArrayTypeFromJSON(object: any): RwArrayType { - switch (object) { - case 0: - case "UNSPECIFIED": - return RwArrayType.UNSPECIFIED; - case 1: - case "INT16": - return RwArrayType.INT16; - case 2: - case "INT32": - return RwArrayType.INT32; - case 3: - case "INT64": - return RwArrayType.INT64; - case 4: - case "FLOAT32": - return RwArrayType.FLOAT32; - case 5: - case "FLOAT64": - return RwArrayType.FLOAT64; - case 6: - case "UTF8": - return RwArrayType.UTF8; - case 7: - case "BOOL": - return RwArrayType.BOOL; - case 8: - case "DECIMAL": - return RwArrayType.DECIMAL; - case 9: - case "DATE": - return RwArrayType.DATE; - case 10: - case "TIME": - return RwArrayType.TIME; - case 11: - case "TIMESTAMP": - return RwArrayType.TIMESTAMP; - case 12: - case "INTERVAL": - return RwArrayType.INTERVAL; - case 13: - case "STRUCT": - return RwArrayType.STRUCT; - case 14: - case "LIST": - return RwArrayType.LIST; - case 15: - case "BYTEA": - return RwArrayType.BYTEA; - case 16: - case "JSONB": - return RwArrayType.JSONB; - case 17: - case "SERIAL": - return RwArrayType.SERIAL; - case -1: - case "UNRECOGNIZED": - default: - return RwArrayType.UNRECOGNIZED; - } -} - -export function rwArrayTypeToJSON(object: RwArrayType): string { - switch (object) { - case RwArrayType.UNSPECIFIED: - return "UNSPECIFIED"; - case RwArrayType.INT16: - return "INT16"; - case RwArrayType.INT32: - return "INT32"; - case RwArrayType.INT64: - return "INT64"; - case RwArrayType.FLOAT32: - return "FLOAT32"; - case RwArrayType.FLOAT64: - return "FLOAT64"; - case RwArrayType.UTF8: - return "UTF8"; - case RwArrayType.BOOL: - return "BOOL"; - case RwArrayType.DECIMAL: - return "DECIMAL"; - case RwArrayType.DATE: - return "DATE"; - case RwArrayType.TIME: - return "TIME"; - case RwArrayType.TIMESTAMP: - return "TIMESTAMP"; - case RwArrayType.INTERVAL: - return "INTERVAL"; - case RwArrayType.STRUCT: - return "STRUCT"; - case RwArrayType.LIST: - return "LIST"; - case RwArrayType.BYTEA: - return "BYTEA"; - case RwArrayType.JSONB: - return "JSONB"; - case RwArrayType.SERIAL: - return "SERIAL"; - case RwArrayType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const Op = { - OP_UNSPECIFIED: "OP_UNSPECIFIED", - INSERT: "INSERT", - DELETE: "DELETE", - UPDATE_INSERT: "UPDATE_INSERT", - UPDATE_DELETE: "UPDATE_DELETE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type Op = typeof Op[keyof typeof Op]; - -export function opFromJSON(object: any): Op { - switch (object) { - case 0: - case "OP_UNSPECIFIED": - return Op.OP_UNSPECIFIED; - case 1: - case "INSERT": - return Op.INSERT; - case 2: - case "DELETE": - return Op.DELETE; - case 3: - case "UPDATE_INSERT": - return Op.UPDATE_INSERT; - case 4: - case "UPDATE_DELETE": - return Op.UPDATE_DELETE; - case -1: - case "UNRECOGNIZED": - default: - return Op.UNRECOGNIZED; - } -} - -export function opToJSON(object: Op): string { - switch (object) { - case Op.OP_UNSPECIFIED: - return "OP_UNSPECIFIED"; - case Op.INSERT: - return "INSERT"; - case Op.DELETE: - return "DELETE"; - case Op.UPDATE_INSERT: - return "UPDATE_INSERT"; - case Op.UPDATE_DELETE: - return "UPDATE_DELETE"; - case Op.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface IntervalUnit { - months: number; - days: number; - ms: number; -} - -export interface DataType { - typeName: DataType_TypeName; - /** - * Data length for char. - * Max data length for varchar. - * Precision for time, decimal. - */ - precision: number; - /** Scale for decimal. */ - scale: number; - isNullable: boolean; - intervalType: DataType_IntervalType; - /** - * For struct type, it represents all the fields in the struct. - * For list type it only contains 1 element which is the inner item type of the List. - * For example, `ARRAY` will be represented as `vec![DataType::Int32]`. - */ - fieldType: DataType[]; - /** Name of the fields if it is a struct type. For other types it will be empty. */ - fieldNames: string[]; -} - -export const DataType_IntervalType = { - UNSPECIFIED: "UNSPECIFIED", - YEAR: "YEAR", - MONTH: "MONTH", - DAY: "DAY", - HOUR: "HOUR", - MINUTE: "MINUTE", - SECOND: "SECOND", - YEAR_TO_MONTH: "YEAR_TO_MONTH", - DAY_TO_HOUR: "DAY_TO_HOUR", - DAY_TO_MINUTE: "DAY_TO_MINUTE", - DAY_TO_SECOND: "DAY_TO_SECOND", - HOUR_TO_MINUTE: "HOUR_TO_MINUTE", - HOUR_TO_SECOND: "HOUR_TO_SECOND", - MINUTE_TO_SECOND: "MINUTE_TO_SECOND", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type DataType_IntervalType = typeof DataType_IntervalType[keyof typeof DataType_IntervalType]; - -export function dataType_IntervalTypeFromJSON(object: any): DataType_IntervalType { - switch (object) { - case 0: - case "UNSPECIFIED": - return DataType_IntervalType.UNSPECIFIED; - case 1: - case "YEAR": - return DataType_IntervalType.YEAR; - case 2: - case "MONTH": - return DataType_IntervalType.MONTH; - case 3: - case "DAY": - return DataType_IntervalType.DAY; - case 4: - case "HOUR": - return DataType_IntervalType.HOUR; - case 5: - case "MINUTE": - return DataType_IntervalType.MINUTE; - case 6: - case "SECOND": - return DataType_IntervalType.SECOND; - case 7: - case "YEAR_TO_MONTH": - return DataType_IntervalType.YEAR_TO_MONTH; - case 8: - case "DAY_TO_HOUR": - return DataType_IntervalType.DAY_TO_HOUR; - case 9: - case "DAY_TO_MINUTE": - return DataType_IntervalType.DAY_TO_MINUTE; - case 10: - case "DAY_TO_SECOND": - return DataType_IntervalType.DAY_TO_SECOND; - case 11: - case "HOUR_TO_MINUTE": - return DataType_IntervalType.HOUR_TO_MINUTE; - case 12: - case "HOUR_TO_SECOND": - return DataType_IntervalType.HOUR_TO_SECOND; - case 13: - case "MINUTE_TO_SECOND": - return DataType_IntervalType.MINUTE_TO_SECOND; - case -1: - case "UNRECOGNIZED": - default: - return DataType_IntervalType.UNRECOGNIZED; - } -} - -export function dataType_IntervalTypeToJSON(object: DataType_IntervalType): string { - switch (object) { - case DataType_IntervalType.UNSPECIFIED: - return "UNSPECIFIED"; - case DataType_IntervalType.YEAR: - return "YEAR"; - case DataType_IntervalType.MONTH: - return "MONTH"; - case DataType_IntervalType.DAY: - return "DAY"; - case DataType_IntervalType.HOUR: - return "HOUR"; - case DataType_IntervalType.MINUTE: - return "MINUTE"; - case DataType_IntervalType.SECOND: - return "SECOND"; - case DataType_IntervalType.YEAR_TO_MONTH: - return "YEAR_TO_MONTH"; - case DataType_IntervalType.DAY_TO_HOUR: - return "DAY_TO_HOUR"; - case DataType_IntervalType.DAY_TO_MINUTE: - return "DAY_TO_MINUTE"; - case DataType_IntervalType.DAY_TO_SECOND: - return "DAY_TO_SECOND"; - case DataType_IntervalType.HOUR_TO_MINUTE: - return "HOUR_TO_MINUTE"; - case DataType_IntervalType.HOUR_TO_SECOND: - return "HOUR_TO_SECOND"; - case DataType_IntervalType.MINUTE_TO_SECOND: - return "MINUTE_TO_SECOND"; - case DataType_IntervalType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const DataType_TypeName = { - TYPE_UNSPECIFIED: "TYPE_UNSPECIFIED", - INT16: "INT16", - INT32: "INT32", - INT64: "INT64", - FLOAT: "FLOAT", - DOUBLE: "DOUBLE", - BOOLEAN: "BOOLEAN", - VARCHAR: "VARCHAR", - DECIMAL: "DECIMAL", - TIME: "TIME", - TIMESTAMP: "TIMESTAMP", - INTERVAL: "INTERVAL", - DATE: "DATE", - /** TIMESTAMPTZ - Timestamp type with timezone */ - TIMESTAMPTZ: "TIMESTAMPTZ", - STRUCT: "STRUCT", - LIST: "LIST", - BYTEA: "BYTEA", - JSONB: "JSONB", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type DataType_TypeName = typeof DataType_TypeName[keyof typeof DataType_TypeName]; - -export function dataType_TypeNameFromJSON(object: any): DataType_TypeName { - switch (object) { - case 0: - case "TYPE_UNSPECIFIED": - return DataType_TypeName.TYPE_UNSPECIFIED; - case 1: - case "INT16": - return DataType_TypeName.INT16; - case 2: - case "INT32": - return DataType_TypeName.INT32; - case 3: - case "INT64": - return DataType_TypeName.INT64; - case 4: - case "FLOAT": - return DataType_TypeName.FLOAT; - case 5: - case "DOUBLE": - return DataType_TypeName.DOUBLE; - case 6: - case "BOOLEAN": - return DataType_TypeName.BOOLEAN; - case 7: - case "VARCHAR": - return DataType_TypeName.VARCHAR; - case 8: - case "DECIMAL": - return DataType_TypeName.DECIMAL; - case 9: - case "TIME": - return DataType_TypeName.TIME; - case 10: - case "TIMESTAMP": - return DataType_TypeName.TIMESTAMP; - case 11: - case "INTERVAL": - return DataType_TypeName.INTERVAL; - case 12: - case "DATE": - return DataType_TypeName.DATE; - case 13: - case "TIMESTAMPTZ": - return DataType_TypeName.TIMESTAMPTZ; - case 15: - case "STRUCT": - return DataType_TypeName.STRUCT; - case 16: - case "LIST": - return DataType_TypeName.LIST; - case 17: - case "BYTEA": - return DataType_TypeName.BYTEA; - case 18: - case "JSONB": - return DataType_TypeName.JSONB; - case -1: - case "UNRECOGNIZED": - default: - return DataType_TypeName.UNRECOGNIZED; - } -} - -export function dataType_TypeNameToJSON(object: DataType_TypeName): string { - switch (object) { - case DataType_TypeName.TYPE_UNSPECIFIED: - return "TYPE_UNSPECIFIED"; - case DataType_TypeName.INT16: - return "INT16"; - case DataType_TypeName.INT32: - return "INT32"; - case DataType_TypeName.INT64: - return "INT64"; - case DataType_TypeName.FLOAT: - return "FLOAT"; - case DataType_TypeName.DOUBLE: - return "DOUBLE"; - case DataType_TypeName.BOOLEAN: - return "BOOLEAN"; - case DataType_TypeName.VARCHAR: - return "VARCHAR"; - case DataType_TypeName.DECIMAL: - return "DECIMAL"; - case DataType_TypeName.TIME: - return "TIME"; - case DataType_TypeName.TIMESTAMP: - return "TIMESTAMP"; - case DataType_TypeName.INTERVAL: - return "INTERVAL"; - case DataType_TypeName.DATE: - return "DATE"; - case DataType_TypeName.TIMESTAMPTZ: - return "TIMESTAMPTZ"; - case DataType_TypeName.STRUCT: - return "STRUCT"; - case DataType_TypeName.LIST: - return "LIST"; - case DataType_TypeName.BYTEA: - return "BYTEA"; - case DataType_TypeName.JSONB: - return "JSONB"; - case DataType_TypeName.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface StructRwArrayData { - childrenArray: RwArray[]; - childrenType: DataType[]; -} - -export interface ListRwArrayData { - offsets: number[]; - value: RwArray | undefined; - valueType: DataType | undefined; -} - -export interface RwArray { - arrayType: RwArrayType; - nullBitmap: Buffer | undefined; - values: Buffer[]; - structArrayData: StructRwArrayData | undefined; - listArrayData: ListRwArrayData | undefined; -} - -export interface Datum { - /** - * bool array/bitmap: one byte, 0 for false (null), non-zero for true (non-null) - * integer, float, double: big-endianness - * interval: encoded to (months, days, milliseconds), big-endianness - * varchar: encoded accorded to encoding, currently only utf8 is supported. - */ - body: Uint8Array; -} - -/** - * New column proto def to replace fixed width column. This def - * aims to include all column type. Currently it do not support struct/array - * but capable of extending in future by add other fields. - */ -export interface Column { - array: RwArray | undefined; -} - -export interface DataChunk { - cardinality: number; - columns: Column[]; -} - -export interface StreamChunk { - /** for Column::from_protobuf(), may not need later */ - cardinality: number; - ops: Op[]; - columns: Column[]; -} - -export interface Epoch { - curr: number; - prev: number; -} - -export interface Terminate { -} - -function createBaseIntervalUnit(): IntervalUnit { - return { months: 0, days: 0, ms: 0 }; -} - -export const IntervalUnit = { - fromJSON(object: any): IntervalUnit { - return { - months: isSet(object.months) ? Number(object.months) : 0, - days: isSet(object.days) ? Number(object.days) : 0, - ms: isSet(object.ms) ? Number(object.ms) : 0, - }; - }, - - toJSON(message: IntervalUnit): unknown { - const obj: any = {}; - message.months !== undefined && (obj.months = Math.round(message.months)); - message.days !== undefined && (obj.days = Math.round(message.days)); - message.ms !== undefined && (obj.ms = Math.round(message.ms)); - return obj; - }, - - fromPartial, I>>(object: I): IntervalUnit { - const message = createBaseIntervalUnit(); - message.months = object.months ?? 0; - message.days = object.days ?? 0; - message.ms = object.ms ?? 0; - return message; - }, -}; - -function createBaseDataType(): DataType { - return { - typeName: DataType_TypeName.TYPE_UNSPECIFIED, - precision: 0, - scale: 0, - isNullable: false, - intervalType: DataType_IntervalType.UNSPECIFIED, - fieldType: [], - fieldNames: [], - }; -} - -export const DataType = { - fromJSON(object: any): DataType { - return { - typeName: isSet(object.typeName) - ? dataType_TypeNameFromJSON(object.typeName) - : DataType_TypeName.TYPE_UNSPECIFIED, - precision: isSet(object.precision) ? Number(object.precision) : 0, - scale: isSet(object.scale) ? Number(object.scale) : 0, - isNullable: isSet(object.isNullable) ? Boolean(object.isNullable) : false, - intervalType: isSet(object.intervalType) - ? dataType_IntervalTypeFromJSON(object.intervalType) - : DataType_IntervalType.UNSPECIFIED, - fieldType: Array.isArray(object?.fieldType) ? object.fieldType.map((e: any) => DataType.fromJSON(e)) : [], - fieldNames: Array.isArray(object?.fieldNames) ? object.fieldNames.map((e: any) => String(e)) : [], - }; - }, - - toJSON(message: DataType): unknown { - const obj: any = {}; - message.typeName !== undefined && (obj.typeName = dataType_TypeNameToJSON(message.typeName)); - message.precision !== undefined && (obj.precision = Math.round(message.precision)); - message.scale !== undefined && (obj.scale = Math.round(message.scale)); - message.isNullable !== undefined && (obj.isNullable = message.isNullable); - message.intervalType !== undefined && (obj.intervalType = dataType_IntervalTypeToJSON(message.intervalType)); - if (message.fieldType) { - obj.fieldType = message.fieldType.map((e) => e ? DataType.toJSON(e) : undefined); - } else { - obj.fieldType = []; - } - if (message.fieldNames) { - obj.fieldNames = message.fieldNames.map((e) => e); - } else { - obj.fieldNames = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DataType { - const message = createBaseDataType(); - message.typeName = object.typeName ?? DataType_TypeName.TYPE_UNSPECIFIED; - message.precision = object.precision ?? 0; - message.scale = object.scale ?? 0; - message.isNullable = object.isNullable ?? false; - message.intervalType = object.intervalType ?? DataType_IntervalType.UNSPECIFIED; - message.fieldType = object.fieldType?.map((e) => DataType.fromPartial(e)) || []; - message.fieldNames = object.fieldNames?.map((e) => e) || []; - return message; - }, -}; - -function createBaseStructRwArrayData(): StructRwArrayData { - return { childrenArray: [], childrenType: [] }; -} - -export const StructRwArrayData = { - fromJSON(object: any): StructRwArrayData { - return { - childrenArray: Array.isArray(object?.childrenArray) - ? object.childrenArray.map((e: any) => RwArray.fromJSON(e)) - : [], - childrenType: Array.isArray(object?.childrenType) - ? object.childrenType.map((e: any) => DataType.fromJSON(e)) - : [], - }; - }, - - toJSON(message: StructRwArrayData): unknown { - const obj: any = {}; - if (message.childrenArray) { - obj.childrenArray = message.childrenArray.map((e) => e ? RwArray.toJSON(e) : undefined); - } else { - obj.childrenArray = []; - } - if (message.childrenType) { - obj.childrenType = message.childrenType.map((e) => e ? DataType.toJSON(e) : undefined); - } else { - obj.childrenType = []; - } - return obj; - }, - - fromPartial, I>>(object: I): StructRwArrayData { - const message = createBaseStructRwArrayData(); - message.childrenArray = object.childrenArray?.map((e) => RwArray.fromPartial(e)) || []; - message.childrenType = object.childrenType?.map((e) => DataType.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseListRwArrayData(): ListRwArrayData { - return { offsets: [], value: undefined, valueType: undefined }; -} - -export const ListRwArrayData = { - fromJSON(object: any): ListRwArrayData { - return { - offsets: Array.isArray(object?.offsets) ? object.offsets.map((e: any) => Number(e)) : [], - value: isSet(object.value) ? RwArray.fromJSON(object.value) : undefined, - valueType: isSet(object.valueType) ? DataType.fromJSON(object.valueType) : undefined, - }; - }, - - toJSON(message: ListRwArrayData): unknown { - const obj: any = {}; - if (message.offsets) { - obj.offsets = message.offsets.map((e) => Math.round(e)); - } else { - obj.offsets = []; - } - message.value !== undefined && (obj.value = message.value ? RwArray.toJSON(message.value) : undefined); - message.valueType !== undefined && - (obj.valueType = message.valueType ? DataType.toJSON(message.valueType) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ListRwArrayData { - const message = createBaseListRwArrayData(); - message.offsets = object.offsets?.map((e) => e) || []; - message.value = (object.value !== undefined && object.value !== null) - ? RwArray.fromPartial(object.value) - : undefined; - message.valueType = (object.valueType !== undefined && object.valueType !== null) - ? DataType.fromPartial(object.valueType) - : undefined; - return message; - }, -}; - -function createBaseRwArray(): RwArray { - return { - arrayType: RwArrayType.UNSPECIFIED, - nullBitmap: undefined, - values: [], - structArrayData: undefined, - listArrayData: undefined, - }; -} - -export const RwArray = { - fromJSON(object: any): RwArray { - return { - arrayType: isSet(object.arrayType) ? rwArrayTypeFromJSON(object.arrayType) : RwArrayType.UNSPECIFIED, - nullBitmap: isSet(object.nullBitmap) ? Buffer.fromJSON(object.nullBitmap) : undefined, - values: Array.isArray(object?.values) ? object.values.map((e: any) => Buffer.fromJSON(e)) : [], - structArrayData: isSet(object.structArrayData) ? StructRwArrayData.fromJSON(object.structArrayData) : undefined, - listArrayData: isSet(object.listArrayData) ? ListRwArrayData.fromJSON(object.listArrayData) : undefined, - }; - }, - - toJSON(message: RwArray): unknown { - const obj: any = {}; - message.arrayType !== undefined && (obj.arrayType = rwArrayTypeToJSON(message.arrayType)); - message.nullBitmap !== undefined && - (obj.nullBitmap = message.nullBitmap ? Buffer.toJSON(message.nullBitmap) : undefined); - if (message.values) { - obj.values = message.values.map((e) => e ? Buffer.toJSON(e) : undefined); - } else { - obj.values = []; - } - message.structArrayData !== undefined && - (obj.structArrayData = message.structArrayData ? StructRwArrayData.toJSON(message.structArrayData) : undefined); - message.listArrayData !== undefined && - (obj.listArrayData = message.listArrayData ? ListRwArrayData.toJSON(message.listArrayData) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): RwArray { - const message = createBaseRwArray(); - message.arrayType = object.arrayType ?? RwArrayType.UNSPECIFIED; - message.nullBitmap = (object.nullBitmap !== undefined && object.nullBitmap !== null) - ? Buffer.fromPartial(object.nullBitmap) - : undefined; - message.values = object.values?.map((e) => Buffer.fromPartial(e)) || []; - message.structArrayData = (object.structArrayData !== undefined && object.structArrayData !== null) - ? StructRwArrayData.fromPartial(object.structArrayData) - : undefined; - message.listArrayData = (object.listArrayData !== undefined && object.listArrayData !== null) - ? ListRwArrayData.fromPartial(object.listArrayData) - : undefined; - return message; - }, -}; - -function createBaseDatum(): Datum { - return { body: new Uint8Array() }; -} - -export const Datum = { - fromJSON(object: any): Datum { - return { body: isSet(object.body) ? bytesFromBase64(object.body) : new Uint8Array() }; - }, - - toJSON(message: Datum): unknown { - const obj: any = {}; - message.body !== undefined && - (obj.body = base64FromBytes(message.body !== undefined ? message.body : new Uint8Array())); - return obj; - }, - - fromPartial, I>>(object: I): Datum { - const message = createBaseDatum(); - message.body = object.body ?? new Uint8Array(); - return message; - }, -}; - -function createBaseColumn(): Column { - return { array: undefined }; -} - -export const Column = { - fromJSON(object: any): Column { - return { array: isSet(object.array) ? RwArray.fromJSON(object.array) : undefined }; - }, - - toJSON(message: Column): unknown { - const obj: any = {}; - message.array !== undefined && (obj.array = message.array ? RwArray.toJSON(message.array) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): Column { - const message = createBaseColumn(); - message.array = (object.array !== undefined && object.array !== null) - ? RwArray.fromPartial(object.array) - : undefined; - return message; - }, -}; - -function createBaseDataChunk(): DataChunk { - return { cardinality: 0, columns: [] }; -} - -export const DataChunk = { - fromJSON(object: any): DataChunk { - return { - cardinality: isSet(object.cardinality) ? Number(object.cardinality) : 0, - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => Column.fromJSON(e)) : [], - }; - }, - - toJSON(message: DataChunk): unknown { - const obj: any = {}; - message.cardinality !== undefined && (obj.cardinality = Math.round(message.cardinality)); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? Column.toJSON(e) : undefined); - } else { - obj.columns = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DataChunk { - const message = createBaseDataChunk(); - message.cardinality = object.cardinality ?? 0; - message.columns = object.columns?.map((e) => Column.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseStreamChunk(): StreamChunk { - return { cardinality: 0, ops: [], columns: [] }; -} - -export const StreamChunk = { - fromJSON(object: any): StreamChunk { - return { - cardinality: isSet(object.cardinality) ? Number(object.cardinality) : 0, - ops: Array.isArray(object?.ops) ? object.ops.map((e: any) => opFromJSON(e)) : [], - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => Column.fromJSON(e)) : [], - }; - }, - - toJSON(message: StreamChunk): unknown { - const obj: any = {}; - message.cardinality !== undefined && (obj.cardinality = Math.round(message.cardinality)); - if (message.ops) { - obj.ops = message.ops.map((e) => opToJSON(e)); - } else { - obj.ops = []; - } - if (message.columns) { - obj.columns = message.columns.map((e) => e ? Column.toJSON(e) : undefined); - } else { - obj.columns = []; - } - return obj; - }, - - fromPartial, I>>(object: I): StreamChunk { - const message = createBaseStreamChunk(); - message.cardinality = object.cardinality ?? 0; - message.ops = object.ops?.map((e) => e) || []; - message.columns = object.columns?.map((e) => Column.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseEpoch(): Epoch { - return { curr: 0, prev: 0 }; -} - -export const Epoch = { - fromJSON(object: any): Epoch { - return { curr: isSet(object.curr) ? Number(object.curr) : 0, prev: isSet(object.prev) ? Number(object.prev) : 0 }; - }, - - toJSON(message: Epoch): unknown { - const obj: any = {}; - message.curr !== undefined && (obj.curr = Math.round(message.curr)); - message.prev !== undefined && (obj.prev = Math.round(message.prev)); - return obj; - }, - - fromPartial, I>>(object: I): Epoch { - const message = createBaseEpoch(); - message.curr = object.curr ?? 0; - message.prev = object.prev ?? 0; - return message; - }, -}; - -function createBaseTerminate(): Terminate { - return {}; -} - -export const Terminate = { - fromJSON(_: any): Terminate { - return {}; - }, - - toJSON(_: Terminate): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): Terminate { - const message = createBaseTerminate(); - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/ddl_service.ts b/dashboard/proto/gen/ddl_service.ts deleted file mode 100644 index 9f55ed090edf9..0000000000000 --- a/dashboard/proto/gen/ddl_service.ts +++ /dev/null @@ -1,1516 +0,0 @@ -/* eslint-disable */ -import { ColIndexMapping, Database, Function, Index, Schema, Sink, Source, Table, View } from "./catalog"; -import { Status } from "./common"; -import { StreamFragmentGraph } from "./stream_plan"; - -export const protobufPackage = "ddl_service"; - -export interface CreateDatabaseRequest { - db: Database | undefined; -} - -export interface CreateDatabaseResponse { - status: Status | undefined; - databaseId: number; - version: number; -} - -export interface DropDatabaseRequest { - databaseId: number; -} - -export interface DropDatabaseResponse { - status: Status | undefined; - version: number; -} - -export interface CreateSchemaRequest { - schema: Schema | undefined; -} - -export interface CreateSchemaResponse { - status: Status | undefined; - schemaId: number; - version: number; -} - -export interface DropSchemaRequest { - schemaId: number; -} - -export interface DropSchemaResponse { - status: Status | undefined; - version: number; -} - -export interface CreateSourceRequest { - source: Source | undefined; -} - -export interface CreateSourceResponse { - status: Status | undefined; - sourceId: number; - version: number; -} - -export interface DropSourceRequest { - sourceId: number; -} - -export interface DropSourceResponse { - status: Status | undefined; - version: number; -} - -export interface CreateSinkRequest { - sink: Sink | undefined; - fragmentGraph: StreamFragmentGraph | undefined; -} - -export interface CreateSinkResponse { - status: Status | undefined; - sinkId: number; - version: number; -} - -export interface DropSinkRequest { - sinkId: number; -} - -export interface DropSinkResponse { - status: Status | undefined; - version: number; -} - -export interface CreateMaterializedViewRequest { - materializedView: Table | undefined; - fragmentGraph: StreamFragmentGraph | undefined; -} - -export interface CreateMaterializedViewResponse { - status: Status | undefined; - tableId: number; - version: number; -} - -export interface DropMaterializedViewRequest { - tableId: number; -} - -export interface DropMaterializedViewResponse { - status: Status | undefined; - version: number; -} - -export interface CreateViewRequest { - view: View | undefined; -} - -export interface CreateViewResponse { - status: Status | undefined; - viewId: number; - version: number; -} - -export interface DropViewRequest { - viewId: number; -} - -export interface DropViewResponse { - status: Status | undefined; - version: number; -} - -export interface CreateTableRequest { - /** - * An optional field and will be `Some` for tables with an external connector. If so, the table - * will subscribe to the changes of the external connector and materialize the data. - */ - source: Source | undefined; - materializedView: Table | undefined; - fragmentGraph: StreamFragmentGraph | undefined; -} - -export interface CreateTableResponse { - status: Status | undefined; - tableId: number; - version: number; -} - -export interface CreateFunctionRequest { - function: Function | undefined; -} - -export interface CreateFunctionResponse { - status: Status | undefined; - functionId: number; - version: number; -} - -export interface DropFunctionRequest { - functionId: number; -} - -export interface DropFunctionResponse { - status: Status | undefined; - version: number; -} - -export interface DropTableRequest { - sourceId?: { $case: "id"; id: number }; - tableId: number; -} - -export interface DropTableResponse { - status: Status | undefined; - version: number; -} - -/** Used by risectl (and in the future, dashboard) */ -export interface RisectlListStateTablesRequest { -} - -/** Used by risectl (and in the future, dashboard) */ -export interface RisectlListStateTablesResponse { - tables: Table[]; -} - -export interface CreateIndexRequest { - index: Index | undefined; - indexTable: Table | undefined; - fragmentGraph: StreamFragmentGraph | undefined; -} - -export interface CreateIndexResponse { - status: Status | undefined; - indexId: number; - version: number; -} - -export interface DropIndexRequest { - indexId: number; -} - -export interface DropIndexResponse { - status: Status | undefined; - version: number; -} - -export interface ReplaceTablePlanRequest { - /** - * The new table catalog, with the correct table ID and a new version. - * If the new version does not match the subsequent version in the meta service's - * catalog, this request will be rejected. - */ - table: - | Table - | undefined; - /** The new materialization plan, where all schema are updated. */ - fragmentGraph: - | StreamFragmentGraph - | undefined; - /** The mapping from the old columns to the new columns of the table. */ - tableColIndexMapping: ColIndexMapping | undefined; -} - -export interface ReplaceTablePlanResponse { - status: - | Status - | undefined; - /** The new global catalog version. */ - version: number; -} - -export interface GetTableRequest { - databaseName: string; - tableName: string; -} - -export interface GetTableResponse { - table: Table | undefined; -} - -export interface GetDdlProgressRequest { -} - -export interface DdlProgress { - id: number; - statement: string; - progress: string; -} - -export interface GetDdlProgressResponse { - ddlProgress: DdlProgress[]; -} - -function createBaseCreateDatabaseRequest(): CreateDatabaseRequest { - return { db: undefined }; -} - -export const CreateDatabaseRequest = { - fromJSON(object: any): CreateDatabaseRequest { - return { db: isSet(object.db) ? Database.fromJSON(object.db) : undefined }; - }, - - toJSON(message: CreateDatabaseRequest): unknown { - const obj: any = {}; - message.db !== undefined && (obj.db = message.db ? Database.toJSON(message.db) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateDatabaseRequest { - const message = createBaseCreateDatabaseRequest(); - message.db = (object.db !== undefined && object.db !== null) ? Database.fromPartial(object.db) : undefined; - return message; - }, -}; - -function createBaseCreateDatabaseResponse(): CreateDatabaseResponse { - return { status: undefined, databaseId: 0, version: 0 }; -} - -export const CreateDatabaseResponse = { - fromJSON(object: any): CreateDatabaseResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateDatabaseResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateDatabaseResponse { - const message = createBaseCreateDatabaseResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.databaseId = object.databaseId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropDatabaseRequest(): DropDatabaseRequest { - return { databaseId: 0 }; -} - -export const DropDatabaseRequest = { - fromJSON(object: any): DropDatabaseRequest { - return { databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0 }; - }, - - toJSON(message: DropDatabaseRequest): unknown { - const obj: any = {}; - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - return obj; - }, - - fromPartial, I>>(object: I): DropDatabaseRequest { - const message = createBaseDropDatabaseRequest(); - message.databaseId = object.databaseId ?? 0; - return message; - }, -}; - -function createBaseDropDatabaseResponse(): DropDatabaseResponse { - return { status: undefined, version: 0 }; -} - -export const DropDatabaseResponse = { - fromJSON(object: any): DropDatabaseResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropDatabaseResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropDatabaseResponse { - const message = createBaseDropDatabaseResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateSchemaRequest(): CreateSchemaRequest { - return { schema: undefined }; -} - -export const CreateSchemaRequest = { - fromJSON(object: any): CreateSchemaRequest { - return { schema: isSet(object.schema) ? Schema.fromJSON(object.schema) : undefined }; - }, - - toJSON(message: CreateSchemaRequest): unknown { - const obj: any = {}; - message.schema !== undefined && (obj.schema = message.schema ? Schema.toJSON(message.schema) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateSchemaRequest { - const message = createBaseCreateSchemaRequest(); - message.schema = (object.schema !== undefined && object.schema !== null) - ? Schema.fromPartial(object.schema) - : undefined; - return message; - }, -}; - -function createBaseCreateSchemaResponse(): CreateSchemaResponse { - return { status: undefined, schemaId: 0, version: 0 }; -} - -export const CreateSchemaResponse = { - fromJSON(object: any): CreateSchemaResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateSchemaResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateSchemaResponse { - const message = createBaseCreateSchemaResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.schemaId = object.schemaId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropSchemaRequest(): DropSchemaRequest { - return { schemaId: 0 }; -} - -export const DropSchemaRequest = { - fromJSON(object: any): DropSchemaRequest { - return { schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0 }; - }, - - toJSON(message: DropSchemaRequest): unknown { - const obj: any = {}; - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - return obj; - }, - - fromPartial, I>>(object: I): DropSchemaRequest { - const message = createBaseDropSchemaRequest(); - message.schemaId = object.schemaId ?? 0; - return message; - }, -}; - -function createBaseDropSchemaResponse(): DropSchemaResponse { - return { status: undefined, version: 0 }; -} - -export const DropSchemaResponse = { - fromJSON(object: any): DropSchemaResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropSchemaResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropSchemaResponse { - const message = createBaseDropSchemaResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateSourceRequest(): CreateSourceRequest { - return { source: undefined }; -} - -export const CreateSourceRequest = { - fromJSON(object: any): CreateSourceRequest { - return { source: isSet(object.source) ? Source.fromJSON(object.source) : undefined }; - }, - - toJSON(message: CreateSourceRequest): unknown { - const obj: any = {}; - message.source !== undefined && (obj.source = message.source ? Source.toJSON(message.source) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateSourceRequest { - const message = createBaseCreateSourceRequest(); - message.source = (object.source !== undefined && object.source !== null) - ? Source.fromPartial(object.source) - : undefined; - return message; - }, -}; - -function createBaseCreateSourceResponse(): CreateSourceResponse { - return { status: undefined, sourceId: 0, version: 0 }; -} - -export const CreateSourceResponse = { - fromJSON(object: any): CreateSourceResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateSourceResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateSourceResponse { - const message = createBaseCreateSourceResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.sourceId = object.sourceId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropSourceRequest(): DropSourceRequest { - return { sourceId: 0 }; -} - -export const DropSourceRequest = { - fromJSON(object: any): DropSourceRequest { - return { sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0 }; - }, - - toJSON(message: DropSourceRequest): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - return obj; - }, - - fromPartial, I>>(object: I): DropSourceRequest { - const message = createBaseDropSourceRequest(); - message.sourceId = object.sourceId ?? 0; - return message; - }, -}; - -function createBaseDropSourceResponse(): DropSourceResponse { - return { status: undefined, version: 0 }; -} - -export const DropSourceResponse = { - fromJSON(object: any): DropSourceResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropSourceResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropSourceResponse { - const message = createBaseDropSourceResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateSinkRequest(): CreateSinkRequest { - return { sink: undefined, fragmentGraph: undefined }; -} - -export const CreateSinkRequest = { - fromJSON(object: any): CreateSinkRequest { - return { - sink: isSet(object.sink) ? Sink.fromJSON(object.sink) : undefined, - fragmentGraph: isSet(object.fragmentGraph) ? StreamFragmentGraph.fromJSON(object.fragmentGraph) : undefined, - }; - }, - - toJSON(message: CreateSinkRequest): unknown { - const obj: any = {}; - message.sink !== undefined && (obj.sink = message.sink ? Sink.toJSON(message.sink) : undefined); - message.fragmentGraph !== undefined && - (obj.fragmentGraph = message.fragmentGraph ? StreamFragmentGraph.toJSON(message.fragmentGraph) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateSinkRequest { - const message = createBaseCreateSinkRequest(); - message.sink = (object.sink !== undefined && object.sink !== null) ? Sink.fromPartial(object.sink) : undefined; - message.fragmentGraph = (object.fragmentGraph !== undefined && object.fragmentGraph !== null) - ? StreamFragmentGraph.fromPartial(object.fragmentGraph) - : undefined; - return message; - }, -}; - -function createBaseCreateSinkResponse(): CreateSinkResponse { - return { status: undefined, sinkId: 0, version: 0 }; -} - -export const CreateSinkResponse = { - fromJSON(object: any): CreateSinkResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - sinkId: isSet(object.sinkId) ? Number(object.sinkId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateSinkResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.sinkId !== undefined && (obj.sinkId = Math.round(message.sinkId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateSinkResponse { - const message = createBaseCreateSinkResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.sinkId = object.sinkId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropSinkRequest(): DropSinkRequest { - return { sinkId: 0 }; -} - -export const DropSinkRequest = { - fromJSON(object: any): DropSinkRequest { - return { sinkId: isSet(object.sinkId) ? Number(object.sinkId) : 0 }; - }, - - toJSON(message: DropSinkRequest): unknown { - const obj: any = {}; - message.sinkId !== undefined && (obj.sinkId = Math.round(message.sinkId)); - return obj; - }, - - fromPartial, I>>(object: I): DropSinkRequest { - const message = createBaseDropSinkRequest(); - message.sinkId = object.sinkId ?? 0; - return message; - }, -}; - -function createBaseDropSinkResponse(): DropSinkResponse { - return { status: undefined, version: 0 }; -} - -export const DropSinkResponse = { - fromJSON(object: any): DropSinkResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropSinkResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropSinkResponse { - const message = createBaseDropSinkResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateMaterializedViewRequest(): CreateMaterializedViewRequest { - return { materializedView: undefined, fragmentGraph: undefined }; -} - -export const CreateMaterializedViewRequest = { - fromJSON(object: any): CreateMaterializedViewRequest { - return { - materializedView: isSet(object.materializedView) ? Table.fromJSON(object.materializedView) : undefined, - fragmentGraph: isSet(object.fragmentGraph) ? StreamFragmentGraph.fromJSON(object.fragmentGraph) : undefined, - }; - }, - - toJSON(message: CreateMaterializedViewRequest): unknown { - const obj: any = {}; - message.materializedView !== undefined && - (obj.materializedView = message.materializedView ? Table.toJSON(message.materializedView) : undefined); - message.fragmentGraph !== undefined && - (obj.fragmentGraph = message.fragmentGraph ? StreamFragmentGraph.toJSON(message.fragmentGraph) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CreateMaterializedViewRequest { - const message = createBaseCreateMaterializedViewRequest(); - message.materializedView = (object.materializedView !== undefined && object.materializedView !== null) - ? Table.fromPartial(object.materializedView) - : undefined; - message.fragmentGraph = (object.fragmentGraph !== undefined && object.fragmentGraph !== null) - ? StreamFragmentGraph.fromPartial(object.fragmentGraph) - : undefined; - return message; - }, -}; - -function createBaseCreateMaterializedViewResponse(): CreateMaterializedViewResponse { - return { status: undefined, tableId: 0, version: 0 }; -} - -export const CreateMaterializedViewResponse = { - fromJSON(object: any): CreateMaterializedViewResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateMaterializedViewResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CreateMaterializedViewResponse { - const message = createBaseCreateMaterializedViewResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.tableId = object.tableId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropMaterializedViewRequest(): DropMaterializedViewRequest { - return { tableId: 0 }; -} - -export const DropMaterializedViewRequest = { - fromJSON(object: any): DropMaterializedViewRequest { - return { tableId: isSet(object.tableId) ? Number(object.tableId) : 0 }; - }, - - toJSON(message: DropMaterializedViewRequest): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - return obj; - }, - - fromPartial, I>>(object: I): DropMaterializedViewRequest { - const message = createBaseDropMaterializedViewRequest(); - message.tableId = object.tableId ?? 0; - return message; - }, -}; - -function createBaseDropMaterializedViewResponse(): DropMaterializedViewResponse { - return { status: undefined, version: 0 }; -} - -export const DropMaterializedViewResponse = { - fromJSON(object: any): DropMaterializedViewResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropMaterializedViewResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropMaterializedViewResponse { - const message = createBaseDropMaterializedViewResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateViewRequest(): CreateViewRequest { - return { view: undefined }; -} - -export const CreateViewRequest = { - fromJSON(object: any): CreateViewRequest { - return { view: isSet(object.view) ? View.fromJSON(object.view) : undefined }; - }, - - toJSON(message: CreateViewRequest): unknown { - const obj: any = {}; - message.view !== undefined && (obj.view = message.view ? View.toJSON(message.view) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateViewRequest { - const message = createBaseCreateViewRequest(); - message.view = (object.view !== undefined && object.view !== null) ? View.fromPartial(object.view) : undefined; - return message; - }, -}; - -function createBaseCreateViewResponse(): CreateViewResponse { - return { status: undefined, viewId: 0, version: 0 }; -} - -export const CreateViewResponse = { - fromJSON(object: any): CreateViewResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - viewId: isSet(object.viewId) ? Number(object.viewId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateViewResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.viewId !== undefined && (obj.viewId = Math.round(message.viewId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateViewResponse { - const message = createBaseCreateViewResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.viewId = object.viewId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropViewRequest(): DropViewRequest { - return { viewId: 0 }; -} - -export const DropViewRequest = { - fromJSON(object: any): DropViewRequest { - return { viewId: isSet(object.viewId) ? Number(object.viewId) : 0 }; - }, - - toJSON(message: DropViewRequest): unknown { - const obj: any = {}; - message.viewId !== undefined && (obj.viewId = Math.round(message.viewId)); - return obj; - }, - - fromPartial, I>>(object: I): DropViewRequest { - const message = createBaseDropViewRequest(); - message.viewId = object.viewId ?? 0; - return message; - }, -}; - -function createBaseDropViewResponse(): DropViewResponse { - return { status: undefined, version: 0 }; -} - -export const DropViewResponse = { - fromJSON(object: any): DropViewResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropViewResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropViewResponse { - const message = createBaseDropViewResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateTableRequest(): CreateTableRequest { - return { source: undefined, materializedView: undefined, fragmentGraph: undefined }; -} - -export const CreateTableRequest = { - fromJSON(object: any): CreateTableRequest { - return { - source: isSet(object.source) ? Source.fromJSON(object.source) : undefined, - materializedView: isSet(object.materializedView) ? Table.fromJSON(object.materializedView) : undefined, - fragmentGraph: isSet(object.fragmentGraph) ? StreamFragmentGraph.fromJSON(object.fragmentGraph) : undefined, - }; - }, - - toJSON(message: CreateTableRequest): unknown { - const obj: any = {}; - message.source !== undefined && (obj.source = message.source ? Source.toJSON(message.source) : undefined); - message.materializedView !== undefined && - (obj.materializedView = message.materializedView ? Table.toJSON(message.materializedView) : undefined); - message.fragmentGraph !== undefined && - (obj.fragmentGraph = message.fragmentGraph ? StreamFragmentGraph.toJSON(message.fragmentGraph) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateTableRequest { - const message = createBaseCreateTableRequest(); - message.source = (object.source !== undefined && object.source !== null) - ? Source.fromPartial(object.source) - : undefined; - message.materializedView = (object.materializedView !== undefined && object.materializedView !== null) - ? Table.fromPartial(object.materializedView) - : undefined; - message.fragmentGraph = (object.fragmentGraph !== undefined && object.fragmentGraph !== null) - ? StreamFragmentGraph.fromPartial(object.fragmentGraph) - : undefined; - return message; - }, -}; - -function createBaseCreateTableResponse(): CreateTableResponse { - return { status: undefined, tableId: 0, version: 0 }; -} - -export const CreateTableResponse = { - fromJSON(object: any): CreateTableResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateTableResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateTableResponse { - const message = createBaseCreateTableResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.tableId = object.tableId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseCreateFunctionRequest(): CreateFunctionRequest { - return { function: undefined }; -} - -export const CreateFunctionRequest = { - fromJSON(object: any): CreateFunctionRequest { - return { function: isSet(object.function) ? Function.fromJSON(object.function) : undefined }; - }, - - toJSON(message: CreateFunctionRequest): unknown { - const obj: any = {}; - message.function !== undefined && (obj.function = message.function ? Function.toJSON(message.function) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateFunctionRequest { - const message = createBaseCreateFunctionRequest(); - message.function = (object.function !== undefined && object.function !== null) - ? Function.fromPartial(object.function) - : undefined; - return message; - }, -}; - -function createBaseCreateFunctionResponse(): CreateFunctionResponse { - return { status: undefined, functionId: 0, version: 0 }; -} - -export const CreateFunctionResponse = { - fromJSON(object: any): CreateFunctionResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - functionId: isSet(object.functionId) ? Number(object.functionId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateFunctionResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.functionId !== undefined && (obj.functionId = Math.round(message.functionId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateFunctionResponse { - const message = createBaseCreateFunctionResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.functionId = object.functionId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropFunctionRequest(): DropFunctionRequest { - return { functionId: 0 }; -} - -export const DropFunctionRequest = { - fromJSON(object: any): DropFunctionRequest { - return { functionId: isSet(object.functionId) ? Number(object.functionId) : 0 }; - }, - - toJSON(message: DropFunctionRequest): unknown { - const obj: any = {}; - message.functionId !== undefined && (obj.functionId = Math.round(message.functionId)); - return obj; - }, - - fromPartial, I>>(object: I): DropFunctionRequest { - const message = createBaseDropFunctionRequest(); - message.functionId = object.functionId ?? 0; - return message; - }, -}; - -function createBaseDropFunctionResponse(): DropFunctionResponse { - return { status: undefined, version: 0 }; -} - -export const DropFunctionResponse = { - fromJSON(object: any): DropFunctionResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropFunctionResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropFunctionResponse { - const message = createBaseDropFunctionResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropTableRequest(): DropTableRequest { - return { sourceId: undefined, tableId: 0 }; -} - -export const DropTableRequest = { - fromJSON(object: any): DropTableRequest { - return { - sourceId: isSet(object.id) ? { $case: "id", id: Number(object.id) } : undefined, - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - }; - }, - - toJSON(message: DropTableRequest): unknown { - const obj: any = {}; - message.sourceId?.$case === "id" && (obj.id = Math.round(message.sourceId?.id)); - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - return obj; - }, - - fromPartial, I>>(object: I): DropTableRequest { - const message = createBaseDropTableRequest(); - if (object.sourceId?.$case === "id" && object.sourceId?.id !== undefined && object.sourceId?.id !== null) { - message.sourceId = { $case: "id", id: object.sourceId.id }; - } - message.tableId = object.tableId ?? 0; - return message; - }, -}; - -function createBaseDropTableResponse(): DropTableResponse { - return { status: undefined, version: 0 }; -} - -export const DropTableResponse = { - fromJSON(object: any): DropTableResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropTableResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropTableResponse { - const message = createBaseDropTableResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseRisectlListStateTablesRequest(): RisectlListStateTablesRequest { - return {}; -} - -export const RisectlListStateTablesRequest = { - fromJSON(_: any): RisectlListStateTablesRequest { - return {}; - }, - - toJSON(_: RisectlListStateTablesRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): RisectlListStateTablesRequest { - const message = createBaseRisectlListStateTablesRequest(); - return message; - }, -}; - -function createBaseRisectlListStateTablesResponse(): RisectlListStateTablesResponse { - return { tables: [] }; -} - -export const RisectlListStateTablesResponse = { - fromJSON(object: any): RisectlListStateTablesResponse { - return { tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => Table.fromJSON(e)) : [] }; - }, - - toJSON(message: RisectlListStateTablesResponse): unknown { - const obj: any = {}; - if (message.tables) { - obj.tables = message.tables.map((e) => e ? Table.toJSON(e) : undefined); - } else { - obj.tables = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): RisectlListStateTablesResponse { - const message = createBaseRisectlListStateTablesResponse(); - message.tables = object.tables?.map((e) => Table.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCreateIndexRequest(): CreateIndexRequest { - return { index: undefined, indexTable: undefined, fragmentGraph: undefined }; -} - -export const CreateIndexRequest = { - fromJSON(object: any): CreateIndexRequest { - return { - index: isSet(object.index) ? Index.fromJSON(object.index) : undefined, - indexTable: isSet(object.indexTable) ? Table.fromJSON(object.indexTable) : undefined, - fragmentGraph: isSet(object.fragmentGraph) ? StreamFragmentGraph.fromJSON(object.fragmentGraph) : undefined, - }; - }, - - toJSON(message: CreateIndexRequest): unknown { - const obj: any = {}; - message.index !== undefined && (obj.index = message.index ? Index.toJSON(message.index) : undefined); - message.indexTable !== undefined && - (obj.indexTable = message.indexTable ? Table.toJSON(message.indexTable) : undefined); - message.fragmentGraph !== undefined && - (obj.fragmentGraph = message.fragmentGraph ? StreamFragmentGraph.toJSON(message.fragmentGraph) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateIndexRequest { - const message = createBaseCreateIndexRequest(); - message.index = (object.index !== undefined && object.index !== null) ? Index.fromPartial(object.index) : undefined; - message.indexTable = (object.indexTable !== undefined && object.indexTable !== null) - ? Table.fromPartial(object.indexTable) - : undefined; - message.fragmentGraph = (object.fragmentGraph !== undefined && object.fragmentGraph !== null) - ? StreamFragmentGraph.fromPartial(object.fragmentGraph) - : undefined; - return message; - }, -}; - -function createBaseCreateIndexResponse(): CreateIndexResponse { - return { status: undefined, indexId: 0, version: 0 }; -} - -export const CreateIndexResponse = { - fromJSON(object: any): CreateIndexResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - indexId: isSet(object.indexId) ? Number(object.indexId) : 0, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateIndexResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.indexId !== undefined && (obj.indexId = Math.round(message.indexId)); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateIndexResponse { - const message = createBaseCreateIndexResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.indexId = object.indexId ?? 0; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropIndexRequest(): DropIndexRequest { - return { indexId: 0 }; -} - -export const DropIndexRequest = { - fromJSON(object: any): DropIndexRequest { - return { indexId: isSet(object.indexId) ? Number(object.indexId) : 0 }; - }, - - toJSON(message: DropIndexRequest): unknown { - const obj: any = {}; - message.indexId !== undefined && (obj.indexId = Math.round(message.indexId)); - return obj; - }, - - fromPartial, I>>(object: I): DropIndexRequest { - const message = createBaseDropIndexRequest(); - message.indexId = object.indexId ?? 0; - return message; - }, -}; - -function createBaseDropIndexResponse(): DropIndexResponse { - return { status: undefined, version: 0 }; -} - -export const DropIndexResponse = { - fromJSON(object: any): DropIndexResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropIndexResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropIndexResponse { - const message = createBaseDropIndexResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseReplaceTablePlanRequest(): ReplaceTablePlanRequest { - return { table: undefined, fragmentGraph: undefined, tableColIndexMapping: undefined }; -} - -export const ReplaceTablePlanRequest = { - fromJSON(object: any): ReplaceTablePlanRequest { - return { - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - fragmentGraph: isSet(object.fragmentGraph) ? StreamFragmentGraph.fromJSON(object.fragmentGraph) : undefined, - tableColIndexMapping: isSet(object.tableColIndexMapping) - ? ColIndexMapping.fromJSON(object.tableColIndexMapping) - : undefined, - }; - }, - - toJSON(message: ReplaceTablePlanRequest): unknown { - const obj: any = {}; - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - message.fragmentGraph !== undefined && - (obj.fragmentGraph = message.fragmentGraph ? StreamFragmentGraph.toJSON(message.fragmentGraph) : undefined); - message.tableColIndexMapping !== undefined && (obj.tableColIndexMapping = message.tableColIndexMapping - ? ColIndexMapping.toJSON(message.tableColIndexMapping) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ReplaceTablePlanRequest { - const message = createBaseReplaceTablePlanRequest(); - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.fragmentGraph = (object.fragmentGraph !== undefined && object.fragmentGraph !== null) - ? StreamFragmentGraph.fromPartial(object.fragmentGraph) - : undefined; - message.tableColIndexMapping = (object.tableColIndexMapping !== undefined && object.tableColIndexMapping !== null) - ? ColIndexMapping.fromPartial(object.tableColIndexMapping) - : undefined; - return message; - }, -}; - -function createBaseReplaceTablePlanResponse(): ReplaceTablePlanResponse { - return { status: undefined, version: 0 }; -} - -export const ReplaceTablePlanResponse = { - fromJSON(object: any): ReplaceTablePlanResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: ReplaceTablePlanResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): ReplaceTablePlanResponse { - const message = createBaseReplaceTablePlanResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseGetTableRequest(): GetTableRequest { - return { databaseName: "", tableName: "" }; -} - -export const GetTableRequest = { - fromJSON(object: any): GetTableRequest { - return { - databaseName: isSet(object.databaseName) ? String(object.databaseName) : "", - tableName: isSet(object.tableName) ? String(object.tableName) : "", - }; - }, - - toJSON(message: GetTableRequest): unknown { - const obj: any = {}; - message.databaseName !== undefined && (obj.databaseName = message.databaseName); - message.tableName !== undefined && (obj.tableName = message.tableName); - return obj; - }, - - fromPartial, I>>(object: I): GetTableRequest { - const message = createBaseGetTableRequest(); - message.databaseName = object.databaseName ?? ""; - message.tableName = object.tableName ?? ""; - return message; - }, -}; - -function createBaseGetTableResponse(): GetTableResponse { - return { table: undefined }; -} - -export const GetTableResponse = { - fromJSON(object: any): GetTableResponse { - return { table: isSet(object.table) ? Table.fromJSON(object.table) : undefined }; - }, - - toJSON(message: GetTableResponse): unknown { - const obj: any = {}; - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetTableResponse { - const message = createBaseGetTableResponse(); - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - return message; - }, -}; - -function createBaseGetDdlProgressRequest(): GetDdlProgressRequest { - return {}; -} - -export const GetDdlProgressRequest = { - fromJSON(_: any): GetDdlProgressRequest { - return {}; - }, - - toJSON(_: GetDdlProgressRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetDdlProgressRequest { - const message = createBaseGetDdlProgressRequest(); - return message; - }, -}; - -function createBaseDdlProgress(): DdlProgress { - return { id: 0, statement: "", progress: "" }; -} - -export const DdlProgress = { - fromJSON(object: any): DdlProgress { - return { - id: isSet(object.id) ? Number(object.id) : 0, - statement: isSet(object.statement) ? String(object.statement) : "", - progress: isSet(object.progress) ? String(object.progress) : "", - }; - }, - - toJSON(message: DdlProgress): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.statement !== undefined && (obj.statement = message.statement); - message.progress !== undefined && (obj.progress = message.progress); - return obj; - }, - - fromPartial, I>>(object: I): DdlProgress { - const message = createBaseDdlProgress(); - message.id = object.id ?? 0; - message.statement = object.statement ?? ""; - message.progress = object.progress ?? ""; - return message; - }, -}; - -function createBaseGetDdlProgressResponse(): GetDdlProgressResponse { - return { ddlProgress: [] }; -} - -export const GetDdlProgressResponse = { - fromJSON(object: any): GetDdlProgressResponse { - return { - ddlProgress: Array.isArray(object?.ddlProgress) - ? object.ddlProgress.map((e: any) => DdlProgress.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GetDdlProgressResponse): unknown { - const obj: any = {}; - if (message.ddlProgress) { - obj.ddlProgress = message.ddlProgress.map((e) => e ? DdlProgress.toJSON(e) : undefined); - } else { - obj.ddlProgress = []; - } - return obj; - }, - - fromPartial, I>>(object: I): GetDdlProgressResponse { - const message = createBaseGetDdlProgressResponse(); - message.ddlProgress = object.ddlProgress?.map((e) => DdlProgress.fromPartial(e)) || []; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/expr.ts b/dashboard/proto/gen/expr.ts deleted file mode 100644 index 08995062c54d2..0000000000000 --- a/dashboard/proto/gen/expr.ts +++ /dev/null @@ -1,1200 +0,0 @@ -/* eslint-disable */ -import { ColumnOrder } from "./common"; -import { DataType, Datum } from "./data"; - -export const protobufPackage = "expr"; - -export interface ExprNode { - exprType: ExprNode_Type; - returnType: DataType | undefined; - rexNode?: { $case: "inputRef"; inputRef: number } | { $case: "constant"; constant: Datum } | { - $case: "funcCall"; - funcCall: FunctionCall; - } | { $case: "udf"; udf: UserDefinedFunction }; -} - -/** - * a "pure function" will be defined as having `1 < expr_node as i32 <= 600`. - * Please modify this definition if adding a pure function that does not belong - * to this range. - */ -export const ExprNode_Type = { - UNSPECIFIED: "UNSPECIFIED", - INPUT_REF: "INPUT_REF", - CONSTANT_VALUE: "CONSTANT_VALUE", - /** ADD - arithmetics operators */ - ADD: "ADD", - SUBTRACT: "SUBTRACT", - MULTIPLY: "MULTIPLY", - DIVIDE: "DIVIDE", - MODULUS: "MODULUS", - /** EQUAL - comparison operators */ - EQUAL: "EQUAL", - NOT_EQUAL: "NOT_EQUAL", - LESS_THAN: "LESS_THAN", - LESS_THAN_OR_EQUAL: "LESS_THAN_OR_EQUAL", - GREATER_THAN: "GREATER_THAN", - GREATER_THAN_OR_EQUAL: "GREATER_THAN_OR_EQUAL", - /** AND - logical operators */ - AND: "AND", - OR: "OR", - NOT: "NOT", - IN: "IN", - SOME: "SOME", - ALL: "ALL", - /** BITWISE_AND - bitwise operators */ - BITWISE_AND: "BITWISE_AND", - BITWISE_OR: "BITWISE_OR", - BITWISE_XOR: "BITWISE_XOR", - BITWISE_NOT: "BITWISE_NOT", - BITWISE_SHIFT_LEFT: "BITWISE_SHIFT_LEFT", - BITWISE_SHIFT_RIGHT: "BITWISE_SHIFT_RIGHT", - /** EXTRACT - date functions */ - EXTRACT: "EXTRACT", - TUMBLE_START: "TUMBLE_START", - /** - * TO_TIMESTAMP - From f64 to timestamp. - * e.g. `select to_timestamp(1672044740.0)` - */ - TO_TIMESTAMP: "TO_TIMESTAMP", - AT_TIME_ZONE: "AT_TIME_ZONE", - DATE_TRUNC: "DATE_TRUNC", - /** - * TO_TIMESTAMP1 - Parse text to timestamp by format string. - * e.g. `select to_timestamp('2022 08 21', 'YYYY MM DD')` - */ - TO_TIMESTAMP1: "TO_TIMESTAMP1", - /** CAST_WITH_TIME_ZONE - Performs a cast with additional timezone information. */ - CAST_WITH_TIME_ZONE: "CAST_WITH_TIME_ZONE", - /** CAST - other functions */ - CAST: "CAST", - SUBSTR: "SUBSTR", - LENGTH: "LENGTH", - LIKE: "LIKE", - UPPER: "UPPER", - LOWER: "LOWER", - TRIM: "TRIM", - REPLACE: "REPLACE", - POSITION: "POSITION", - LTRIM: "LTRIM", - RTRIM: "RTRIM", - CASE: "CASE", - /** ROUND_DIGIT - ROUND(numeric, integer) -> numeric */ - ROUND_DIGIT: "ROUND_DIGIT", - /** - * ROUND - ROUND(numeric) -> numeric - * ROUND(double precision) -> double precision - */ - ROUND: "ROUND", - ASCII: "ASCII", - TRANSLATE: "TRANSLATE", - COALESCE: "COALESCE", - CONCAT_WS: "CONCAT_WS", - ABS: "ABS", - SPLIT_PART: "SPLIT_PART", - CEIL: "CEIL", - FLOOR: "FLOOR", - TO_CHAR: "TO_CHAR", - MD5: "MD5", - CHAR_LENGTH: "CHAR_LENGTH", - REPEAT: "REPEAT", - CONCAT_OP: "CONCAT_OP", - /** BOOL_OUT - BOOL_OUT is different from CAST-bool-to-varchar in PostgreSQL. */ - BOOL_OUT: "BOOL_OUT", - OCTET_LENGTH: "OCTET_LENGTH", - BIT_LENGTH: "BIT_LENGTH", - OVERLAY: "OVERLAY", - REGEXP_MATCH: "REGEXP_MATCH", - POW: "POW", - EXP: "EXP", - /** IS_TRUE - Boolean comparison */ - IS_TRUE: "IS_TRUE", - IS_NOT_TRUE: "IS_NOT_TRUE", - IS_FALSE: "IS_FALSE", - IS_NOT_FALSE: "IS_NOT_FALSE", - IS_NULL: "IS_NULL", - IS_NOT_NULL: "IS_NOT_NULL", - IS_DISTINCT_FROM: "IS_DISTINCT_FROM", - IS_NOT_DISTINCT_FROM: "IS_NOT_DISTINCT_FROM", - /** NEG - Unary operators */ - NEG: "NEG", - /** FIELD - Nested selection operators */ - FIELD: "FIELD", - /** ARRAY - Array expression. */ - ARRAY: "ARRAY", - ARRAY_ACCESS: "ARRAY_ACCESS", - ROW: "ROW", - ARRAY_TO_STRING: "ARRAY_TO_STRING", - /** ARRAY_CAT - Array functions */ - ARRAY_CAT: "ARRAY_CAT", - ARRAY_APPEND: "ARRAY_APPEND", - ARRAY_PREPEND: "ARRAY_PREPEND", - FORMAT_TYPE: "FORMAT_TYPE", - ARRAY_DISTINCT: "ARRAY_DISTINCT", - /** JSONB_ACCESS_INNER - jsonb -> int, jsonb -> text, jsonb #> text[] that returns jsonb */ - JSONB_ACCESS_INNER: "JSONB_ACCESS_INNER", - /** JSONB_ACCESS_STR - jsonb ->> int, jsonb ->> text, jsonb #>> text[] that returns text */ - JSONB_ACCESS_STR: "JSONB_ACCESS_STR", - JSONB_TYPEOF: "JSONB_TYPEOF", - JSONB_ARRAY_LENGTH: "JSONB_ARRAY_LENGTH", - /** - * VNODE - Non-pure functions below (> 1000) - * ------------------------ - * Internal functions - */ - VNODE: "VNODE", - /** NOW - Non-deterministic functions */ - NOW: "NOW", - /** UDF - User defined functions */ - UDF: "UDF", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type ExprNode_Type = typeof ExprNode_Type[keyof typeof ExprNode_Type]; - -export function exprNode_TypeFromJSON(object: any): ExprNode_Type { - switch (object) { - case 0: - case "UNSPECIFIED": - return ExprNode_Type.UNSPECIFIED; - case 1: - case "INPUT_REF": - return ExprNode_Type.INPUT_REF; - case 2: - case "CONSTANT_VALUE": - return ExprNode_Type.CONSTANT_VALUE; - case 3: - case "ADD": - return ExprNode_Type.ADD; - case 4: - case "SUBTRACT": - return ExprNode_Type.SUBTRACT; - case 5: - case "MULTIPLY": - return ExprNode_Type.MULTIPLY; - case 6: - case "DIVIDE": - return ExprNode_Type.DIVIDE; - case 7: - case "MODULUS": - return ExprNode_Type.MODULUS; - case 8: - case "EQUAL": - return ExprNode_Type.EQUAL; - case 9: - case "NOT_EQUAL": - return ExprNode_Type.NOT_EQUAL; - case 10: - case "LESS_THAN": - return ExprNode_Type.LESS_THAN; - case 11: - case "LESS_THAN_OR_EQUAL": - return ExprNode_Type.LESS_THAN_OR_EQUAL; - case 12: - case "GREATER_THAN": - return ExprNode_Type.GREATER_THAN; - case 13: - case "GREATER_THAN_OR_EQUAL": - return ExprNode_Type.GREATER_THAN_OR_EQUAL; - case 21: - case "AND": - return ExprNode_Type.AND; - case 22: - case "OR": - return ExprNode_Type.OR; - case 23: - case "NOT": - return ExprNode_Type.NOT; - case 24: - case "IN": - return ExprNode_Type.IN; - case 25: - case "SOME": - return ExprNode_Type.SOME; - case 26: - case "ALL": - return ExprNode_Type.ALL; - case 31: - case "BITWISE_AND": - return ExprNode_Type.BITWISE_AND; - case 32: - case "BITWISE_OR": - return ExprNode_Type.BITWISE_OR; - case 33: - case "BITWISE_XOR": - return ExprNode_Type.BITWISE_XOR; - case 34: - case "BITWISE_NOT": - return ExprNode_Type.BITWISE_NOT; - case 35: - case "BITWISE_SHIFT_LEFT": - return ExprNode_Type.BITWISE_SHIFT_LEFT; - case 36: - case "BITWISE_SHIFT_RIGHT": - return ExprNode_Type.BITWISE_SHIFT_RIGHT; - case 101: - case "EXTRACT": - return ExprNode_Type.EXTRACT; - case 103: - case "TUMBLE_START": - return ExprNode_Type.TUMBLE_START; - case 104: - case "TO_TIMESTAMP": - return ExprNode_Type.TO_TIMESTAMP; - case 105: - case "AT_TIME_ZONE": - return ExprNode_Type.AT_TIME_ZONE; - case 106: - case "DATE_TRUNC": - return ExprNode_Type.DATE_TRUNC; - case 107: - case "TO_TIMESTAMP1": - return ExprNode_Type.TO_TIMESTAMP1; - case 108: - case "CAST_WITH_TIME_ZONE": - return ExprNode_Type.CAST_WITH_TIME_ZONE; - case 201: - case "CAST": - return ExprNode_Type.CAST; - case 202: - case "SUBSTR": - return ExprNode_Type.SUBSTR; - case 203: - case "LENGTH": - return ExprNode_Type.LENGTH; - case 204: - case "LIKE": - return ExprNode_Type.LIKE; - case 205: - case "UPPER": - return ExprNode_Type.UPPER; - case 206: - case "LOWER": - return ExprNode_Type.LOWER; - case 207: - case "TRIM": - return ExprNode_Type.TRIM; - case 208: - case "REPLACE": - return ExprNode_Type.REPLACE; - case 209: - case "POSITION": - return ExprNode_Type.POSITION; - case 210: - case "LTRIM": - return ExprNode_Type.LTRIM; - case 211: - case "RTRIM": - return ExprNode_Type.RTRIM; - case 212: - case "CASE": - return ExprNode_Type.CASE; - case 213: - case "ROUND_DIGIT": - return ExprNode_Type.ROUND_DIGIT; - case 214: - case "ROUND": - return ExprNode_Type.ROUND; - case 215: - case "ASCII": - return ExprNode_Type.ASCII; - case 216: - case "TRANSLATE": - return ExprNode_Type.TRANSLATE; - case 217: - case "COALESCE": - return ExprNode_Type.COALESCE; - case 218: - case "CONCAT_WS": - return ExprNode_Type.CONCAT_WS; - case 219: - case "ABS": - return ExprNode_Type.ABS; - case 220: - case "SPLIT_PART": - return ExprNode_Type.SPLIT_PART; - case 221: - case "CEIL": - return ExprNode_Type.CEIL; - case 222: - case "FLOOR": - return ExprNode_Type.FLOOR; - case 223: - case "TO_CHAR": - return ExprNode_Type.TO_CHAR; - case 224: - case "MD5": - return ExprNode_Type.MD5; - case 225: - case "CHAR_LENGTH": - return ExprNode_Type.CHAR_LENGTH; - case 226: - case "REPEAT": - return ExprNode_Type.REPEAT; - case 227: - case "CONCAT_OP": - return ExprNode_Type.CONCAT_OP; - case 228: - case "BOOL_OUT": - return ExprNode_Type.BOOL_OUT; - case 229: - case "OCTET_LENGTH": - return ExprNode_Type.OCTET_LENGTH; - case 230: - case "BIT_LENGTH": - return ExprNode_Type.BIT_LENGTH; - case 231: - case "OVERLAY": - return ExprNode_Type.OVERLAY; - case 232: - case "REGEXP_MATCH": - return ExprNode_Type.REGEXP_MATCH; - case 233: - case "POW": - return ExprNode_Type.POW; - case 234: - case "EXP": - return ExprNode_Type.EXP; - case 301: - case "IS_TRUE": - return ExprNode_Type.IS_TRUE; - case 302: - case "IS_NOT_TRUE": - return ExprNode_Type.IS_NOT_TRUE; - case 303: - case "IS_FALSE": - return ExprNode_Type.IS_FALSE; - case 304: - case "IS_NOT_FALSE": - return ExprNode_Type.IS_NOT_FALSE; - case 305: - case "IS_NULL": - return ExprNode_Type.IS_NULL; - case 306: - case "IS_NOT_NULL": - return ExprNode_Type.IS_NOT_NULL; - case 307: - case "IS_DISTINCT_FROM": - return ExprNode_Type.IS_DISTINCT_FROM; - case 308: - case "IS_NOT_DISTINCT_FROM": - return ExprNode_Type.IS_NOT_DISTINCT_FROM; - case 401: - case "NEG": - return ExprNode_Type.NEG; - case 501: - case "FIELD": - return ExprNode_Type.FIELD; - case 521: - case "ARRAY": - return ExprNode_Type.ARRAY; - case 522: - case "ARRAY_ACCESS": - return ExprNode_Type.ARRAY_ACCESS; - case 523: - case "ROW": - return ExprNode_Type.ROW; - case 524: - case "ARRAY_TO_STRING": - return ExprNode_Type.ARRAY_TO_STRING; - case 531: - case "ARRAY_CAT": - return ExprNode_Type.ARRAY_CAT; - case 532: - case "ARRAY_APPEND": - return ExprNode_Type.ARRAY_APPEND; - case 533: - case "ARRAY_PREPEND": - return ExprNode_Type.ARRAY_PREPEND; - case 534: - case "FORMAT_TYPE": - return ExprNode_Type.FORMAT_TYPE; - case 535: - case "ARRAY_DISTINCT": - return ExprNode_Type.ARRAY_DISTINCT; - case 600: - case "JSONB_ACCESS_INNER": - return ExprNode_Type.JSONB_ACCESS_INNER; - case 601: - case "JSONB_ACCESS_STR": - return ExprNode_Type.JSONB_ACCESS_STR; - case 602: - case "JSONB_TYPEOF": - return ExprNode_Type.JSONB_TYPEOF; - case 603: - case "JSONB_ARRAY_LENGTH": - return ExprNode_Type.JSONB_ARRAY_LENGTH; - case 1101: - case "VNODE": - return ExprNode_Type.VNODE; - case 2022: - case "NOW": - return ExprNode_Type.NOW; - case 3000: - case "UDF": - return ExprNode_Type.UDF; - case -1: - case "UNRECOGNIZED": - default: - return ExprNode_Type.UNRECOGNIZED; - } -} - -export function exprNode_TypeToJSON(object: ExprNode_Type): string { - switch (object) { - case ExprNode_Type.UNSPECIFIED: - return "UNSPECIFIED"; - case ExprNode_Type.INPUT_REF: - return "INPUT_REF"; - case ExprNode_Type.CONSTANT_VALUE: - return "CONSTANT_VALUE"; - case ExprNode_Type.ADD: - return "ADD"; - case ExprNode_Type.SUBTRACT: - return "SUBTRACT"; - case ExprNode_Type.MULTIPLY: - return "MULTIPLY"; - case ExprNode_Type.DIVIDE: - return "DIVIDE"; - case ExprNode_Type.MODULUS: - return "MODULUS"; - case ExprNode_Type.EQUAL: - return "EQUAL"; - case ExprNode_Type.NOT_EQUAL: - return "NOT_EQUAL"; - case ExprNode_Type.LESS_THAN: - return "LESS_THAN"; - case ExprNode_Type.LESS_THAN_OR_EQUAL: - return "LESS_THAN_OR_EQUAL"; - case ExprNode_Type.GREATER_THAN: - return "GREATER_THAN"; - case ExprNode_Type.GREATER_THAN_OR_EQUAL: - return "GREATER_THAN_OR_EQUAL"; - case ExprNode_Type.AND: - return "AND"; - case ExprNode_Type.OR: - return "OR"; - case ExprNode_Type.NOT: - return "NOT"; - case ExprNode_Type.IN: - return "IN"; - case ExprNode_Type.SOME: - return "SOME"; - case ExprNode_Type.ALL: - return "ALL"; - case ExprNode_Type.BITWISE_AND: - return "BITWISE_AND"; - case ExprNode_Type.BITWISE_OR: - return "BITWISE_OR"; - case ExprNode_Type.BITWISE_XOR: - return "BITWISE_XOR"; - case ExprNode_Type.BITWISE_NOT: - return "BITWISE_NOT"; - case ExprNode_Type.BITWISE_SHIFT_LEFT: - return "BITWISE_SHIFT_LEFT"; - case ExprNode_Type.BITWISE_SHIFT_RIGHT: - return "BITWISE_SHIFT_RIGHT"; - case ExprNode_Type.EXTRACT: - return "EXTRACT"; - case ExprNode_Type.TUMBLE_START: - return "TUMBLE_START"; - case ExprNode_Type.TO_TIMESTAMP: - return "TO_TIMESTAMP"; - case ExprNode_Type.AT_TIME_ZONE: - return "AT_TIME_ZONE"; - case ExprNode_Type.DATE_TRUNC: - return "DATE_TRUNC"; - case ExprNode_Type.TO_TIMESTAMP1: - return "TO_TIMESTAMP1"; - case ExprNode_Type.CAST_WITH_TIME_ZONE: - return "CAST_WITH_TIME_ZONE"; - case ExprNode_Type.CAST: - return "CAST"; - case ExprNode_Type.SUBSTR: - return "SUBSTR"; - case ExprNode_Type.LENGTH: - return "LENGTH"; - case ExprNode_Type.LIKE: - return "LIKE"; - case ExprNode_Type.UPPER: - return "UPPER"; - case ExprNode_Type.LOWER: - return "LOWER"; - case ExprNode_Type.TRIM: - return "TRIM"; - case ExprNode_Type.REPLACE: - return "REPLACE"; - case ExprNode_Type.POSITION: - return "POSITION"; - case ExprNode_Type.LTRIM: - return "LTRIM"; - case ExprNode_Type.RTRIM: - return "RTRIM"; - case ExprNode_Type.CASE: - return "CASE"; - case ExprNode_Type.ROUND_DIGIT: - return "ROUND_DIGIT"; - case ExprNode_Type.ROUND: - return "ROUND"; - case ExprNode_Type.ASCII: - return "ASCII"; - case ExprNode_Type.TRANSLATE: - return "TRANSLATE"; - case ExprNode_Type.COALESCE: - return "COALESCE"; - case ExprNode_Type.CONCAT_WS: - return "CONCAT_WS"; - case ExprNode_Type.ABS: - return "ABS"; - case ExprNode_Type.SPLIT_PART: - return "SPLIT_PART"; - case ExprNode_Type.CEIL: - return "CEIL"; - case ExprNode_Type.FLOOR: - return "FLOOR"; - case ExprNode_Type.TO_CHAR: - return "TO_CHAR"; - case ExprNode_Type.MD5: - return "MD5"; - case ExprNode_Type.CHAR_LENGTH: - return "CHAR_LENGTH"; - case ExprNode_Type.REPEAT: - return "REPEAT"; - case ExprNode_Type.CONCAT_OP: - return "CONCAT_OP"; - case ExprNode_Type.BOOL_OUT: - return "BOOL_OUT"; - case ExprNode_Type.OCTET_LENGTH: - return "OCTET_LENGTH"; - case ExprNode_Type.BIT_LENGTH: - return "BIT_LENGTH"; - case ExprNode_Type.OVERLAY: - return "OVERLAY"; - case ExprNode_Type.REGEXP_MATCH: - return "REGEXP_MATCH"; - case ExprNode_Type.POW: - return "POW"; - case ExprNode_Type.EXP: - return "EXP"; - case ExprNode_Type.IS_TRUE: - return "IS_TRUE"; - case ExprNode_Type.IS_NOT_TRUE: - return "IS_NOT_TRUE"; - case ExprNode_Type.IS_FALSE: - return "IS_FALSE"; - case ExprNode_Type.IS_NOT_FALSE: - return "IS_NOT_FALSE"; - case ExprNode_Type.IS_NULL: - return "IS_NULL"; - case ExprNode_Type.IS_NOT_NULL: - return "IS_NOT_NULL"; - case ExprNode_Type.IS_DISTINCT_FROM: - return "IS_DISTINCT_FROM"; - case ExprNode_Type.IS_NOT_DISTINCT_FROM: - return "IS_NOT_DISTINCT_FROM"; - case ExprNode_Type.NEG: - return "NEG"; - case ExprNode_Type.FIELD: - return "FIELD"; - case ExprNode_Type.ARRAY: - return "ARRAY"; - case ExprNode_Type.ARRAY_ACCESS: - return "ARRAY_ACCESS"; - case ExprNode_Type.ROW: - return "ROW"; - case ExprNode_Type.ARRAY_TO_STRING: - return "ARRAY_TO_STRING"; - case ExprNode_Type.ARRAY_CAT: - return "ARRAY_CAT"; - case ExprNode_Type.ARRAY_APPEND: - return "ARRAY_APPEND"; - case ExprNode_Type.ARRAY_PREPEND: - return "ARRAY_PREPEND"; - case ExprNode_Type.FORMAT_TYPE: - return "FORMAT_TYPE"; - case ExprNode_Type.ARRAY_DISTINCT: - return "ARRAY_DISTINCT"; - case ExprNode_Type.JSONB_ACCESS_INNER: - return "JSONB_ACCESS_INNER"; - case ExprNode_Type.JSONB_ACCESS_STR: - return "JSONB_ACCESS_STR"; - case ExprNode_Type.JSONB_TYPEOF: - return "JSONB_TYPEOF"; - case ExprNode_Type.JSONB_ARRAY_LENGTH: - return "JSONB_ARRAY_LENGTH"; - case ExprNode_Type.VNODE: - return "VNODE"; - case ExprNode_Type.NOW: - return "NOW"; - case ExprNode_Type.UDF: - return "UDF"; - case ExprNode_Type.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableFunction { - functionType: TableFunction_Type; - args: ExprNode[]; - returnType: DataType | undefined; -} - -export const TableFunction_Type = { - UNSPECIFIED: "UNSPECIFIED", - GENERATE: "GENERATE", - UNNEST: "UNNEST", - REGEXP_MATCHES: "REGEXP_MATCHES", - RANGE: "RANGE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type TableFunction_Type = typeof TableFunction_Type[keyof typeof TableFunction_Type]; - -export function tableFunction_TypeFromJSON(object: any): TableFunction_Type { - switch (object) { - case 0: - case "UNSPECIFIED": - return TableFunction_Type.UNSPECIFIED; - case 1: - case "GENERATE": - return TableFunction_Type.GENERATE; - case 2: - case "UNNEST": - return TableFunction_Type.UNNEST; - case 3: - case "REGEXP_MATCHES": - return TableFunction_Type.REGEXP_MATCHES; - case 4: - case "RANGE": - return TableFunction_Type.RANGE; - case -1: - case "UNRECOGNIZED": - default: - return TableFunction_Type.UNRECOGNIZED; - } -} - -export function tableFunction_TypeToJSON(object: TableFunction_Type): string { - switch (object) { - case TableFunction_Type.UNSPECIFIED: - return "UNSPECIFIED"; - case TableFunction_Type.GENERATE: - return "GENERATE"; - case TableFunction_Type.UNNEST: - return "UNNEST"; - case TableFunction_Type.REGEXP_MATCHES: - return "REGEXP_MATCHES"; - case TableFunction_Type.RANGE: - return "RANGE"; - case TableFunction_Type.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Reference to an upstream column, containing its index and data type. */ -export interface InputRef { - index: number; - type: DataType | undefined; -} - -/** - * The items which can occur in the select list of `ProjectSet` operator. - * - * When there are table functions in the SQL query `SELECT ...`, it will be planned as `ProjectSet`. - * Otherwise it will be planned as `Project`. - * - * # Examples - * - * ```sql - * # Project - * select 1; - * - * # ProjectSet - * select unnest(array[1,2,3]); - * - * # ProjectSet (table function & usual expression) - * select unnest(array[1,2,3]), 1; - * - * # ProjectSet (multiple table functions) - * select unnest(array[1,2,3]), unnest(array[4,5]); - * - * # ProjectSet over ProjectSet (table function as parameters of table function) - * select unnest(regexp_matches(v1, 'a(\d)c(\d)', 'g')) from t; - * - * # Project over ProjectSet (table function as parameters of usual function) - * select unnest(regexp_matches(v1, 'a(\d)c(\d)', 'g')) from t; - * ``` - */ -export interface ProjectSetSelectItem { - selectItem?: { $case: "expr"; expr: ExprNode } | { $case: "tableFunction"; tableFunction: TableFunction }; -} - -export interface FunctionCall { - children: ExprNode[]; -} - -/** Aggregate Function Calls for Aggregation */ -export interface AggCall { - type: AggCall_Type; - args: InputRef[]; - returnType: DataType | undefined; - distinct: boolean; - orderBy: ColumnOrder[]; - filter: ExprNode | undefined; -} - -export const AggCall_Type = { - UNSPECIFIED: "UNSPECIFIED", - SUM: "SUM", - MIN: "MIN", - MAX: "MAX", - COUNT: "COUNT", - AVG: "AVG", - STRING_AGG: "STRING_AGG", - APPROX_COUNT_DISTINCT: "APPROX_COUNT_DISTINCT", - ARRAY_AGG: "ARRAY_AGG", - FIRST_VALUE: "FIRST_VALUE", - SUM0: "SUM0", - VAR_POP: "VAR_POP", - VAR_SAMP: "VAR_SAMP", - STDDEV_POP: "STDDEV_POP", - STDDEV_SAMP: "STDDEV_SAMP", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type AggCall_Type = typeof AggCall_Type[keyof typeof AggCall_Type]; - -export function aggCall_TypeFromJSON(object: any): AggCall_Type { - switch (object) { - case 0: - case "UNSPECIFIED": - return AggCall_Type.UNSPECIFIED; - case 1: - case "SUM": - return AggCall_Type.SUM; - case 2: - case "MIN": - return AggCall_Type.MIN; - case 3: - case "MAX": - return AggCall_Type.MAX; - case 4: - case "COUNT": - return AggCall_Type.COUNT; - case 5: - case "AVG": - return AggCall_Type.AVG; - case 6: - case "STRING_AGG": - return AggCall_Type.STRING_AGG; - case 7: - case "APPROX_COUNT_DISTINCT": - return AggCall_Type.APPROX_COUNT_DISTINCT; - case 8: - case "ARRAY_AGG": - return AggCall_Type.ARRAY_AGG; - case 9: - case "FIRST_VALUE": - return AggCall_Type.FIRST_VALUE; - case 10: - case "SUM0": - return AggCall_Type.SUM0; - case 11: - case "VAR_POP": - return AggCall_Type.VAR_POP; - case 12: - case "VAR_SAMP": - return AggCall_Type.VAR_SAMP; - case 13: - case "STDDEV_POP": - return AggCall_Type.STDDEV_POP; - case 14: - case "STDDEV_SAMP": - return AggCall_Type.STDDEV_SAMP; - case -1: - case "UNRECOGNIZED": - default: - return AggCall_Type.UNRECOGNIZED; - } -} - -export function aggCall_TypeToJSON(object: AggCall_Type): string { - switch (object) { - case AggCall_Type.UNSPECIFIED: - return "UNSPECIFIED"; - case AggCall_Type.SUM: - return "SUM"; - case AggCall_Type.MIN: - return "MIN"; - case AggCall_Type.MAX: - return "MAX"; - case AggCall_Type.COUNT: - return "COUNT"; - case AggCall_Type.AVG: - return "AVG"; - case AggCall_Type.STRING_AGG: - return "STRING_AGG"; - case AggCall_Type.APPROX_COUNT_DISTINCT: - return "APPROX_COUNT_DISTINCT"; - case AggCall_Type.ARRAY_AGG: - return "ARRAY_AGG"; - case AggCall_Type.FIRST_VALUE: - return "FIRST_VALUE"; - case AggCall_Type.SUM0: - return "SUM0"; - case AggCall_Type.VAR_POP: - return "VAR_POP"; - case AggCall_Type.VAR_SAMP: - return "VAR_SAMP"; - case AggCall_Type.STDDEV_POP: - return "STDDEV_POP"; - case AggCall_Type.STDDEV_SAMP: - return "STDDEV_SAMP"; - case AggCall_Type.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface UserDefinedFunction { - children: ExprNode[]; - name: string; - argTypes: DataType[]; - language: string; - link: string; - identifier: string; -} - -function createBaseExprNode(): ExprNode { - return { exprType: ExprNode_Type.UNSPECIFIED, returnType: undefined, rexNode: undefined }; -} - -export const ExprNode = { - fromJSON(object: any): ExprNode { - return { - exprType: isSet(object.exprType) ? exprNode_TypeFromJSON(object.exprType) : ExprNode_Type.UNSPECIFIED, - returnType: isSet(object.returnType) ? DataType.fromJSON(object.returnType) : undefined, - rexNode: isSet(object.inputRef) - ? { $case: "inputRef", inputRef: Number(object.inputRef) } - : isSet(object.constant) - ? { $case: "constant", constant: Datum.fromJSON(object.constant) } - : isSet(object.funcCall) - ? { $case: "funcCall", funcCall: FunctionCall.fromJSON(object.funcCall) } - : isSet(object.udf) - ? { $case: "udf", udf: UserDefinedFunction.fromJSON(object.udf) } - : undefined, - }; - }, - - toJSON(message: ExprNode): unknown { - const obj: any = {}; - message.exprType !== undefined && (obj.exprType = exprNode_TypeToJSON(message.exprType)); - message.returnType !== undefined && - (obj.returnType = message.returnType ? DataType.toJSON(message.returnType) : undefined); - message.rexNode?.$case === "inputRef" && (obj.inputRef = Math.round(message.rexNode?.inputRef)); - message.rexNode?.$case === "constant" && - (obj.constant = message.rexNode?.constant ? Datum.toJSON(message.rexNode?.constant) : undefined); - message.rexNode?.$case === "funcCall" && - (obj.funcCall = message.rexNode?.funcCall ? FunctionCall.toJSON(message.rexNode?.funcCall) : undefined); - message.rexNode?.$case === "udf" && - (obj.udf = message.rexNode?.udf ? UserDefinedFunction.toJSON(message.rexNode?.udf) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ExprNode { - const message = createBaseExprNode(); - message.exprType = object.exprType ?? ExprNode_Type.UNSPECIFIED; - message.returnType = (object.returnType !== undefined && object.returnType !== null) - ? DataType.fromPartial(object.returnType) - : undefined; - if ( - object.rexNode?.$case === "inputRef" && - object.rexNode?.inputRef !== undefined && - object.rexNode?.inputRef !== null - ) { - message.rexNode = { $case: "inputRef", inputRef: object.rexNode.inputRef }; - } - if ( - object.rexNode?.$case === "constant" && - object.rexNode?.constant !== undefined && - object.rexNode?.constant !== null - ) { - message.rexNode = { $case: "constant", constant: Datum.fromPartial(object.rexNode.constant) }; - } - if ( - object.rexNode?.$case === "funcCall" && - object.rexNode?.funcCall !== undefined && - object.rexNode?.funcCall !== null - ) { - message.rexNode = { $case: "funcCall", funcCall: FunctionCall.fromPartial(object.rexNode.funcCall) }; - } - if (object.rexNode?.$case === "udf" && object.rexNode?.udf !== undefined && object.rexNode?.udf !== null) { - message.rexNode = { $case: "udf", udf: UserDefinedFunction.fromPartial(object.rexNode.udf) }; - } - return message; - }, -}; - -function createBaseTableFunction(): TableFunction { - return { functionType: TableFunction_Type.UNSPECIFIED, args: [], returnType: undefined }; -} - -export const TableFunction = { - fromJSON(object: any): TableFunction { - return { - functionType: isSet(object.functionType) - ? tableFunction_TypeFromJSON(object.functionType) - : TableFunction_Type.UNSPECIFIED, - args: Array.isArray(object?.args) - ? object.args.map((e: any) => ExprNode.fromJSON(e)) - : [], - returnType: isSet(object.returnType) ? DataType.fromJSON(object.returnType) : undefined, - }; - }, - - toJSON(message: TableFunction): unknown { - const obj: any = {}; - message.functionType !== undefined && (obj.functionType = tableFunction_TypeToJSON(message.functionType)); - if (message.args) { - obj.args = message.args.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.args = []; - } - message.returnType !== undefined && - (obj.returnType = message.returnType ? DataType.toJSON(message.returnType) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): TableFunction { - const message = createBaseTableFunction(); - message.functionType = object.functionType ?? TableFunction_Type.UNSPECIFIED; - message.args = object.args?.map((e) => ExprNode.fromPartial(e)) || []; - message.returnType = (object.returnType !== undefined && object.returnType !== null) - ? DataType.fromPartial(object.returnType) - : undefined; - return message; - }, -}; - -function createBaseInputRef(): InputRef { - return { index: 0, type: undefined }; -} - -export const InputRef = { - fromJSON(object: any): InputRef { - return { - index: isSet(object.index) ? Number(object.index) : 0, - type: isSet(object.type) ? DataType.fromJSON(object.type) : undefined, - }; - }, - - toJSON(message: InputRef): unknown { - const obj: any = {}; - message.index !== undefined && (obj.index = Math.round(message.index)); - message.type !== undefined && (obj.type = message.type ? DataType.toJSON(message.type) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): InputRef { - const message = createBaseInputRef(); - message.index = object.index ?? 0; - message.type = (object.type !== undefined && object.type !== null) ? DataType.fromPartial(object.type) : undefined; - return message; - }, -}; - -function createBaseProjectSetSelectItem(): ProjectSetSelectItem { - return { selectItem: undefined }; -} - -export const ProjectSetSelectItem = { - fromJSON(object: any): ProjectSetSelectItem { - return { - selectItem: isSet(object.expr) - ? { $case: "expr", expr: ExprNode.fromJSON(object.expr) } - : isSet(object.tableFunction) - ? { $case: "tableFunction", tableFunction: TableFunction.fromJSON(object.tableFunction) } - : undefined, - }; - }, - - toJSON(message: ProjectSetSelectItem): unknown { - const obj: any = {}; - message.selectItem?.$case === "expr" && - (obj.expr = message.selectItem?.expr ? ExprNode.toJSON(message.selectItem?.expr) : undefined); - message.selectItem?.$case === "tableFunction" && (obj.tableFunction = message.selectItem?.tableFunction - ? TableFunction.toJSON(message.selectItem?.tableFunction) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ProjectSetSelectItem { - const message = createBaseProjectSetSelectItem(); - if ( - object.selectItem?.$case === "expr" && object.selectItem?.expr !== undefined && object.selectItem?.expr !== null - ) { - message.selectItem = { $case: "expr", expr: ExprNode.fromPartial(object.selectItem.expr) }; - } - if ( - object.selectItem?.$case === "tableFunction" && - object.selectItem?.tableFunction !== undefined && - object.selectItem?.tableFunction !== null - ) { - message.selectItem = { - $case: "tableFunction", - tableFunction: TableFunction.fromPartial(object.selectItem.tableFunction), - }; - } - return message; - }, -}; - -function createBaseFunctionCall(): FunctionCall { - return { children: [] }; -} - -export const FunctionCall = { - fromJSON(object: any): FunctionCall { - return { children: Array.isArray(object?.children) ? object.children.map((e: any) => ExprNode.fromJSON(e)) : [] }; - }, - - toJSON(message: FunctionCall): unknown { - const obj: any = {}; - if (message.children) { - obj.children = message.children.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.children = []; - } - return obj; - }, - - fromPartial, I>>(object: I): FunctionCall { - const message = createBaseFunctionCall(); - message.children = object.children?.map((e) => ExprNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseAggCall(): AggCall { - return { - type: AggCall_Type.UNSPECIFIED, - args: [], - returnType: undefined, - distinct: false, - orderBy: [], - filter: undefined, - }; -} - -export const AggCall = { - fromJSON(object: any): AggCall { - return { - type: isSet(object.type) ? aggCall_TypeFromJSON(object.type) : AggCall_Type.UNSPECIFIED, - args: Array.isArray(object?.args) ? object.args.map((e: any) => InputRef.fromJSON(e)) : [], - returnType: isSet(object.returnType) ? DataType.fromJSON(object.returnType) : undefined, - distinct: isSet(object.distinct) ? Boolean(object.distinct) : false, - orderBy: Array.isArray(object?.orderBy) ? object.orderBy.map((e: any) => ColumnOrder.fromJSON(e)) : [], - filter: isSet(object.filter) ? ExprNode.fromJSON(object.filter) : undefined, - }; - }, - - toJSON(message: AggCall): unknown { - const obj: any = {}; - message.type !== undefined && (obj.type = aggCall_TypeToJSON(message.type)); - if (message.args) { - obj.args = message.args.map((e) => e ? InputRef.toJSON(e) : undefined); - } else { - obj.args = []; - } - message.returnType !== undefined && - (obj.returnType = message.returnType ? DataType.toJSON(message.returnType) : undefined); - message.distinct !== undefined && (obj.distinct = message.distinct); - if (message.orderBy) { - obj.orderBy = message.orderBy.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.orderBy = []; - } - message.filter !== undefined && (obj.filter = message.filter ? ExprNode.toJSON(message.filter) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AggCall { - const message = createBaseAggCall(); - message.type = object.type ?? AggCall_Type.UNSPECIFIED; - message.args = object.args?.map((e) => InputRef.fromPartial(e)) || []; - message.returnType = (object.returnType !== undefined && object.returnType !== null) - ? DataType.fromPartial(object.returnType) - : undefined; - message.distinct = object.distinct ?? false; - message.orderBy = object.orderBy?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.filter = (object.filter !== undefined && object.filter !== null) - ? ExprNode.fromPartial(object.filter) - : undefined; - return message; - }, -}; - -function createBaseUserDefinedFunction(): UserDefinedFunction { - return { children: [], name: "", argTypes: [], language: "", link: "", identifier: "" }; -} - -export const UserDefinedFunction = { - fromJSON(object: any): UserDefinedFunction { - return { - children: Array.isArray(object?.children) ? object.children.map((e: any) => ExprNode.fromJSON(e)) : [], - name: isSet(object.name) ? String(object.name) : "", - argTypes: Array.isArray(object?.argTypes) ? object.argTypes.map((e: any) => DataType.fromJSON(e)) : [], - language: isSet(object.language) ? String(object.language) : "", - link: isSet(object.link) ? String(object.link) : "", - identifier: isSet(object.identifier) ? String(object.identifier) : "", - }; - }, - - toJSON(message: UserDefinedFunction): unknown { - const obj: any = {}; - if (message.children) { - obj.children = message.children.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.children = []; - } - message.name !== undefined && (obj.name = message.name); - if (message.argTypes) { - obj.argTypes = message.argTypes.map((e) => e ? DataType.toJSON(e) : undefined); - } else { - obj.argTypes = []; - } - message.language !== undefined && (obj.language = message.language); - message.link !== undefined && (obj.link = message.link); - message.identifier !== undefined && (obj.identifier = message.identifier); - return obj; - }, - - fromPartial, I>>(object: I): UserDefinedFunction { - const message = createBaseUserDefinedFunction(); - message.children = object.children?.map((e) => ExprNode.fromPartial(e)) || []; - message.name = object.name ?? ""; - message.argTypes = object.argTypes?.map((e) => DataType.fromPartial(e)) || []; - message.language = object.language ?? ""; - message.link = object.link ?? ""; - message.identifier = object.identifier ?? ""; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/health.ts b/dashboard/proto/gen/health.ts deleted file mode 100644 index c77e2b8bebe9e..0000000000000 --- a/dashboard/proto/gen/health.ts +++ /dev/null @@ -1,117 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "health"; - -export interface HealthCheckRequest { - service: string; -} - -export interface HealthCheckResponse { - status: HealthCheckResponse_ServingStatus; -} - -export const HealthCheckResponse_ServingStatus = { - UNKNOWN: "UNKNOWN", - SERVING: "SERVING", - NOT_SERVING: "NOT_SERVING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type HealthCheckResponse_ServingStatus = - typeof HealthCheckResponse_ServingStatus[keyof typeof HealthCheckResponse_ServingStatus]; - -export function healthCheckResponse_ServingStatusFromJSON(object: any): HealthCheckResponse_ServingStatus { - switch (object) { - case 0: - case "UNKNOWN": - return HealthCheckResponse_ServingStatus.UNKNOWN; - case 1: - case "SERVING": - return HealthCheckResponse_ServingStatus.SERVING; - case 2: - case "NOT_SERVING": - return HealthCheckResponse_ServingStatus.NOT_SERVING; - case -1: - case "UNRECOGNIZED": - default: - return HealthCheckResponse_ServingStatus.UNRECOGNIZED; - } -} - -export function healthCheckResponse_ServingStatusToJSON(object: HealthCheckResponse_ServingStatus): string { - switch (object) { - case HealthCheckResponse_ServingStatus.UNKNOWN: - return "UNKNOWN"; - case HealthCheckResponse_ServingStatus.SERVING: - return "SERVING"; - case HealthCheckResponse_ServingStatus.NOT_SERVING: - return "NOT_SERVING"; - case HealthCheckResponse_ServingStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseHealthCheckRequest(): HealthCheckRequest { - return { service: "" }; -} - -export const HealthCheckRequest = { - fromJSON(object: any): HealthCheckRequest { - return { service: isSet(object.service) ? String(object.service) : "" }; - }, - - toJSON(message: HealthCheckRequest): unknown { - const obj: any = {}; - message.service !== undefined && (obj.service = message.service); - return obj; - }, - - fromPartial, I>>(object: I): HealthCheckRequest { - const message = createBaseHealthCheckRequest(); - message.service = object.service ?? ""; - return message; - }, -}; - -function createBaseHealthCheckResponse(): HealthCheckResponse { - return { status: HealthCheckResponse_ServingStatus.UNKNOWN }; -} - -export const HealthCheckResponse = { - fromJSON(object: any): HealthCheckResponse { - return { - status: isSet(object.status) - ? healthCheckResponse_ServingStatusFromJSON(object.status) - : HealthCheckResponse_ServingStatus.UNKNOWN, - }; - }, - - toJSON(message: HealthCheckResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = healthCheckResponse_ServingStatusToJSON(message.status)); - return obj; - }, - - fromPartial, I>>(object: I): HealthCheckResponse { - const message = createBaseHealthCheckResponse(); - message.status = object.status ?? HealthCheckResponse_ServingStatus.UNKNOWN; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/hummock.ts b/dashboard/proto/gen/hummock.ts deleted file mode 100644 index b747e56ef1c1e..0000000000000 --- a/dashboard/proto/gen/hummock.ts +++ /dev/null @@ -1,4564 +0,0 @@ -/* eslint-disable */ -import { Table } from "./catalog"; -import { Status, WorkerNode } from "./common"; -import { CompactorRuntimeConfig } from "./compactor"; - -export const protobufPackage = "hummock"; - -export const LevelType = { - UNSPECIFIED: "UNSPECIFIED", - NONOVERLAPPING: "NONOVERLAPPING", - OVERLAPPING: "OVERLAPPING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type LevelType = typeof LevelType[keyof typeof LevelType]; - -export function levelTypeFromJSON(object: any): LevelType { - switch (object) { - case 0: - case "UNSPECIFIED": - return LevelType.UNSPECIFIED; - case 1: - case "NONOVERLAPPING": - return LevelType.NONOVERLAPPING; - case 2: - case "OVERLAPPING": - return LevelType.OVERLAPPING; - case -1: - case "UNRECOGNIZED": - default: - return LevelType.UNRECOGNIZED; - } -} - -export function levelTypeToJSON(object: LevelType): string { - switch (object) { - case LevelType.UNSPECIFIED: - return "UNSPECIFIED"; - case LevelType.NONOVERLAPPING: - return "NONOVERLAPPING"; - case LevelType.OVERLAPPING: - return "OVERLAPPING"; - case LevelType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface SstableInfo { - id: number; - keyRange: KeyRange | undefined; - fileSize: number; - tableIds: number[]; - metaOffset: number; - staleKeyCount: number; - totalKeyCount: number; - /** When a SST is divided, its divide_version will increase one. */ - divideVersion: number; - minEpoch: number; - maxEpoch: number; - uncompressedFileSize: number; -} - -export interface OverlappingLevel { - subLevels: Level[]; - totalFileSize: number; - uncompressedFileSize: number; -} - -export interface Level { - levelIdx: number; - levelType: LevelType; - tableInfos: SstableInfo[]; - totalFileSize: number; - subLevelId: number; - uncompressedFileSize: number; -} - -export interface InputLevel { - levelIdx: number; - levelType: LevelType; - tableInfos: SstableInfo[]; -} - -export interface IntraLevelDelta { - levelIdx: number; - l0SubLevelId: number; - removedTableIds: number[]; - insertedTableInfos: SstableInfo[]; -} - -export interface GroupConstruct { - groupConfig: - | CompactionConfig - | undefined; - /** If `parent_group_id` is not 0, it means `parent_group_id` splits into `parent_group_id` and this group, so this group is not empty initially. */ - parentGroupId: number; - tableIds: number[]; - groupId: number; -} - -export interface GroupMetaChange { - tableIdsAdd: number[]; - tableIdsRemove: number[]; -} - -export interface GroupDestroy { -} - -export interface GroupDelta { - deltaType?: - | { $case: "intraLevel"; intraLevel: IntraLevelDelta } - | { $case: "groupConstruct"; groupConstruct: GroupConstruct } - | { $case: "groupDestroy"; groupDestroy: GroupDestroy } - | { $case: "groupMetaChange"; groupMetaChange: GroupMetaChange }; -} - -export interface UncommittedEpoch { - epoch: number; - tables: SstableInfo[]; -} - -export interface HummockVersion { - id: number; - /** Levels of each compaction group */ - levels: { [key: number]: HummockVersion_Levels }; - maxCommittedEpoch: number; - /** - * Snapshots with epoch less than the safe epoch have been GCed. - * Reads against such an epoch will fail. - */ - safeEpoch: number; -} - -export interface HummockVersion_Levels { - levels: Level[]; - l0: OverlappingLevel | undefined; - groupId: number; - parentGroupId: number; - memberTableIds: number[]; -} - -export interface HummockVersion_LevelsEntry { - key: number; - value: HummockVersion_Levels | undefined; -} - -export interface HummockVersionDelta { - id: number; - prevId: number; - /** Levels of each compaction group */ - groupDeltas: { [key: number]: HummockVersionDelta_GroupDeltas }; - maxCommittedEpoch: number; - /** - * Snapshots with epoch less than the safe epoch have been GCed. - * Reads against such an epoch will fail. - */ - safeEpoch: number; - trivialMove: boolean; - gcSstIds: number[]; -} - -export interface HummockVersionDelta_GroupDeltas { - groupDeltas: GroupDelta[]; -} - -export interface HummockVersionDelta_GroupDeltasEntry { - key: number; - value: HummockVersionDelta_GroupDeltas | undefined; -} - -export interface HummockVersionDeltas { - versionDeltas: HummockVersionDelta[]; -} - -/** We will have two epoch after decouple */ -export interface HummockSnapshot { - /** Epoch with checkpoint, we will read durable data with it. */ - committedEpoch: number; - /** Epoch without checkpoint, we will read real-time data with it. But it may be rolled back. */ - currentEpoch: number; -} - -export interface VersionUpdatePayload { - payload?: { $case: "versionDeltas"; versionDeltas: HummockVersionDeltas } | { - $case: "pinnedVersion"; - pinnedVersion: HummockVersion; - }; -} - -export interface UnpinVersionBeforeRequest { - contextId: number; - unpinVersionBefore: number; -} - -export interface UnpinVersionBeforeResponse { - status: Status | undefined; -} - -export interface GetCurrentVersionRequest { -} - -export interface GetCurrentVersionResponse { - status: Status | undefined; - currentVersion: HummockVersion | undefined; -} - -export interface UnpinVersionRequest { - contextId: number; -} - -export interface UnpinVersionResponse { - status: Status | undefined; -} - -export interface PinSnapshotRequest { - contextId: number; -} - -export interface PinSpecificSnapshotRequest { - contextId: number; - epoch: number; -} - -export interface GetAssignedCompactTaskNumRequest { -} - -export interface GetAssignedCompactTaskNumResponse { - numTasks: number; -} - -export interface PinSnapshotResponse { - status: Status | undefined; - snapshot: HummockSnapshot | undefined; -} - -export interface GetEpochRequest { -} - -export interface GetEpochResponse { - status: Status | undefined; - snapshot: HummockSnapshot | undefined; -} - -export interface UnpinSnapshotRequest { - contextId: number; -} - -export interface UnpinSnapshotResponse { - status: Status | undefined; -} - -export interface UnpinSnapshotBeforeRequest { - contextId: number; - minSnapshot: HummockSnapshot | undefined; -} - -export interface UnpinSnapshotBeforeResponse { - status: Status | undefined; -} - -/** - * When `right_exclusive=false`, it represents [left, right], of which both boundary are open. When `right_exclusive=true`, - * it represents [left, right), of which right is exclusive. - */ -export interface KeyRange { - left: Uint8Array; - right: Uint8Array; - rightExclusive: boolean; -} - -export interface TableOption { - retentionSeconds: number; -} - -export interface CompactTask { - /** SSTs to be compacted, which will be removed from LSM after compaction */ - inputSsts: InputLevel[]; - /** - * In ideal case, the compaction will generate `splits.len()` tables which have key range - * corresponding to that in [`splits`], respectively - */ - splits: KeyRange[]; - /** low watermark in 'ts-aware compaction' */ - watermark: number; - /** compaction output, which will be added to [`target_level`] of LSM after compaction */ - sortedOutputSsts: SstableInfo[]; - /** task id assigned by hummock storage service */ - taskId: number; - /** compaction output will be added to [`target_level`] of LSM after compaction */ - targetLevel: number; - gcDeleteKeys: boolean; - taskStatus: CompactTask_TaskStatus; - /** compaction group the task belongs to */ - compactionGroupId: number; - /** existing_table_ids for compaction drop key */ - existingTableIds: number[]; - compressionAlgorithm: number; - targetFileSize: number; - compactionFilterMask: number; - tableOptions: { [key: number]: TableOption }; - currentEpochTime: number; - targetSubLevelId: number; - /** Identifies whether the task is space_reclaim, if the compact_task_type increases, it will be refactored to enum */ - taskType: CompactTask_TaskType; - splitByStateTable: boolean; -} - -export const CompactTask_TaskStatus = { - UNSPECIFIED: "UNSPECIFIED", - PENDING: "PENDING", - SUCCESS: "SUCCESS", - HEARTBEAT_CANCELED: "HEARTBEAT_CANCELED", - NO_AVAIL_CANCELED: "NO_AVAIL_CANCELED", - ASSIGN_FAIL_CANCELED: "ASSIGN_FAIL_CANCELED", - SEND_FAIL_CANCELED: "SEND_FAIL_CANCELED", - MANUAL_CANCELED: "MANUAL_CANCELED", - INVALID_GROUP_CANCELED: "INVALID_GROUP_CANCELED", - EXECUTE_FAILED: "EXECUTE_FAILED", - JOIN_HANDLE_FAILED: "JOIN_HANDLE_FAILED", - TRACK_SST_ID_FAILED: "TRACK_SST_ID_FAILED", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type CompactTask_TaskStatus = typeof CompactTask_TaskStatus[keyof typeof CompactTask_TaskStatus]; - -export function compactTask_TaskStatusFromJSON(object: any): CompactTask_TaskStatus { - switch (object) { - case 0: - case "UNSPECIFIED": - return CompactTask_TaskStatus.UNSPECIFIED; - case 1: - case "PENDING": - return CompactTask_TaskStatus.PENDING; - case 2: - case "SUCCESS": - return CompactTask_TaskStatus.SUCCESS; - case 3: - case "HEARTBEAT_CANCELED": - return CompactTask_TaskStatus.HEARTBEAT_CANCELED; - case 4: - case "NO_AVAIL_CANCELED": - return CompactTask_TaskStatus.NO_AVAIL_CANCELED; - case 5: - case "ASSIGN_FAIL_CANCELED": - return CompactTask_TaskStatus.ASSIGN_FAIL_CANCELED; - case 6: - case "SEND_FAIL_CANCELED": - return CompactTask_TaskStatus.SEND_FAIL_CANCELED; - case 7: - case "MANUAL_CANCELED": - return CompactTask_TaskStatus.MANUAL_CANCELED; - case 8: - case "INVALID_GROUP_CANCELED": - return CompactTask_TaskStatus.INVALID_GROUP_CANCELED; - case 9: - case "EXECUTE_FAILED": - return CompactTask_TaskStatus.EXECUTE_FAILED; - case 10: - case "JOIN_HANDLE_FAILED": - return CompactTask_TaskStatus.JOIN_HANDLE_FAILED; - case 11: - case "TRACK_SST_ID_FAILED": - return CompactTask_TaskStatus.TRACK_SST_ID_FAILED; - case -1: - case "UNRECOGNIZED": - default: - return CompactTask_TaskStatus.UNRECOGNIZED; - } -} - -export function compactTask_TaskStatusToJSON(object: CompactTask_TaskStatus): string { - switch (object) { - case CompactTask_TaskStatus.UNSPECIFIED: - return "UNSPECIFIED"; - case CompactTask_TaskStatus.PENDING: - return "PENDING"; - case CompactTask_TaskStatus.SUCCESS: - return "SUCCESS"; - case CompactTask_TaskStatus.HEARTBEAT_CANCELED: - return "HEARTBEAT_CANCELED"; - case CompactTask_TaskStatus.NO_AVAIL_CANCELED: - return "NO_AVAIL_CANCELED"; - case CompactTask_TaskStatus.ASSIGN_FAIL_CANCELED: - return "ASSIGN_FAIL_CANCELED"; - case CompactTask_TaskStatus.SEND_FAIL_CANCELED: - return "SEND_FAIL_CANCELED"; - case CompactTask_TaskStatus.MANUAL_CANCELED: - return "MANUAL_CANCELED"; - case CompactTask_TaskStatus.INVALID_GROUP_CANCELED: - return "INVALID_GROUP_CANCELED"; - case CompactTask_TaskStatus.EXECUTE_FAILED: - return "EXECUTE_FAILED"; - case CompactTask_TaskStatus.JOIN_HANDLE_FAILED: - return "JOIN_HANDLE_FAILED"; - case CompactTask_TaskStatus.TRACK_SST_ID_FAILED: - return "TRACK_SST_ID_FAILED"; - case CompactTask_TaskStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const CompactTask_TaskType = { - TYPE_UNSPECIFIED: "TYPE_UNSPECIFIED", - DYNAMIC: "DYNAMIC", - SPACE_RECLAIM: "SPACE_RECLAIM", - MANUAL: "MANUAL", - SHARED_BUFFER: "SHARED_BUFFER", - TTL: "TTL", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type CompactTask_TaskType = typeof CompactTask_TaskType[keyof typeof CompactTask_TaskType]; - -export function compactTask_TaskTypeFromJSON(object: any): CompactTask_TaskType { - switch (object) { - case 0: - case "TYPE_UNSPECIFIED": - return CompactTask_TaskType.TYPE_UNSPECIFIED; - case 1: - case "DYNAMIC": - return CompactTask_TaskType.DYNAMIC; - case 2: - case "SPACE_RECLAIM": - return CompactTask_TaskType.SPACE_RECLAIM; - case 3: - case "MANUAL": - return CompactTask_TaskType.MANUAL; - case 4: - case "SHARED_BUFFER": - return CompactTask_TaskType.SHARED_BUFFER; - case 5: - case "TTL": - return CompactTask_TaskType.TTL; - case -1: - case "UNRECOGNIZED": - default: - return CompactTask_TaskType.UNRECOGNIZED; - } -} - -export function compactTask_TaskTypeToJSON(object: CompactTask_TaskType): string { - switch (object) { - case CompactTask_TaskType.TYPE_UNSPECIFIED: - return "TYPE_UNSPECIFIED"; - case CompactTask_TaskType.DYNAMIC: - return "DYNAMIC"; - case CompactTask_TaskType.SPACE_RECLAIM: - return "SPACE_RECLAIM"; - case CompactTask_TaskType.MANUAL: - return "MANUAL"; - case CompactTask_TaskType.SHARED_BUFFER: - return "SHARED_BUFFER"; - case CompactTask_TaskType.TTL: - return "TTL"; - case CompactTask_TaskType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface CompactTask_TableOptionsEntry { - key: number; - value: TableOption | undefined; -} - -export interface LevelHandler { - level: number; - tasks: LevelHandler_RunningCompactTask[]; -} - -export interface LevelHandler_RunningCompactTask { - taskId: number; - ssts: number[]; - totalFileSize: number; - targetLevel: number; -} - -export interface CompactStatus { - compactionGroupId: number; - levelHandlers: LevelHandler[]; -} - -/** Config info of compaction group. */ -export interface CompactionGroup { - id: number; - compactionConfig: CompactionConfig | undefined; -} - -/** - * Complete info of compaction group. - * The info is the aggregate of `HummockVersion` and `CompactionGroupConfig` - */ -export interface CompactionGroupInfo { - id: number; - parentId: number; - memberTableIds: number[]; - compactionConfig: CompactionConfig | undefined; -} - -export interface CompactTaskAssignment { - compactTask: CompactTask | undefined; - contextId: number; -} - -export interface GetCompactionTasksRequest { -} - -export interface GetCompactionTasksResponse { - status: Status | undefined; - compactTask: CompactTask | undefined; -} - -export interface ReportCompactionTasksRequest { - contextId: number; - compactTask: CompactTask | undefined; - tableStatsChange: { [key: number]: TableStats }; -} - -export interface ReportCompactionTasksRequest_TableStatsChangeEntry { - key: number; - value: TableStats | undefined; -} - -export interface ReportCompactionTasksResponse { - status: Status | undefined; -} - -export interface HummockPinnedVersion { - contextId: number; - minPinnedId: number; -} - -export interface HummockPinnedSnapshot { - contextId: number; - minimalPinnedSnapshot: number; -} - -export interface GetNewSstIdsRequest { - number: number; -} - -export interface GetNewSstIdsResponse { - status: - | Status - | undefined; - /** inclusive */ - startId: number; - /** exclusive */ - endId: number; -} - -/** - * This is a heartbeat message. Task will be considered dead if - * `CompactTaskProgress` is not received for a timeout - * or `num_ssts_sealed`/`num_ssts_uploaded` do not increase for a timeout. - */ -export interface CompactTaskProgress { - taskId: number; - numSstsSealed: number; - numSstsUploaded: number; -} - -export interface ReportCompactionTaskProgressRequest { - contextId: number; - progress: CompactTaskProgress[]; -} - -export interface ReportCompactionTaskProgressResponse { - status: Status | undefined; -} - -export interface SubscribeCompactTasksRequest { - contextId: number; - maxConcurrentTaskNumber: number; -} - -export interface ValidationTask { - sstInfos: SstableInfo[]; - sstIdToWorkerId: { [key: number]: number }; - epoch: number; -} - -export interface ValidationTask_SstIdToWorkerIdEntry { - key: number; - value: number; -} - -export interface SubscribeCompactTasksResponse { - task?: - | { $case: "compactTask"; compactTask: CompactTask } - | { $case: "vacuumTask"; vacuumTask: VacuumTask } - | { $case: "fullScanTask"; fullScanTask: FullScanTask } - | { $case: "validationTask"; validationTask: ValidationTask } - | { $case: "cancelCompactTask"; cancelCompactTask: CancelCompactTask }; -} - -/** Delete SSTs in object store */ -export interface VacuumTask { - sstableIds: number[]; -} - -/** Scan object store to get candidate orphan SSTs. */ -export interface FullScanTask { - sstRetentionTimeSec: number; -} - -/** Cancel compact task */ -export interface CancelCompactTask { - contextId: number; - taskId: number; -} - -export interface ReportVacuumTaskRequest { - vacuumTask: VacuumTask | undefined; -} - -export interface ReportVacuumTaskResponse { - status: Status | undefined; -} - -export interface TriggerManualCompactionRequest { - compactionGroupId: number; - keyRange: KeyRange | undefined; - tableId: number; - level: number; - sstIds: number[]; -} - -export interface TriggerManualCompactionResponse { - status: Status | undefined; -} - -export interface ReportFullScanTaskRequest { - sstIds: number[]; -} - -export interface ReportFullScanTaskResponse { - status: Status | undefined; -} - -export interface TriggerFullGCRequest { - sstRetentionTimeSec: number; -} - -export interface TriggerFullGCResponse { - status: Status | undefined; -} - -export interface ListVersionDeltasRequest { - startId: number; - numLimit: number; - committedEpochLimit: number; -} - -export interface ListVersionDeltasResponse { - versionDeltas: HummockVersionDeltas | undefined; -} - -export interface PinnedVersionsSummary { - pinnedVersions: HummockPinnedVersion[]; - workers: { [key: number]: WorkerNode }; -} - -export interface PinnedVersionsSummary_WorkersEntry { - key: number; - value: WorkerNode | undefined; -} - -export interface PinnedSnapshotsSummary { - pinnedSnapshots: HummockPinnedSnapshot[]; - workers: { [key: number]: WorkerNode }; -} - -export interface PinnedSnapshotsSummary_WorkersEntry { - key: number; - value: WorkerNode | undefined; -} - -export interface RiseCtlGetPinnedVersionsSummaryRequest { -} - -export interface RiseCtlGetPinnedVersionsSummaryResponse { - summary: PinnedVersionsSummary | undefined; -} - -export interface RiseCtlGetPinnedSnapshotsSummaryRequest { -} - -export interface RiseCtlGetPinnedSnapshotsSummaryResponse { - summary: PinnedSnapshotsSummary | undefined; -} - -export interface InitMetadataForReplayRequest { - tables: Table[]; - compactionGroups: CompactionGroupInfo[]; -} - -export interface InitMetadataForReplayResponse { -} - -export interface ReplayVersionDeltaRequest { - versionDelta: HummockVersionDelta | undefined; -} - -export interface ReplayVersionDeltaResponse { - version: HummockVersion | undefined; - modifiedCompactionGroups: number[]; -} - -export interface TriggerCompactionDeterministicRequest { - versionId: number; - compactionGroups: number[]; -} - -export interface TriggerCompactionDeterministicResponse { -} - -export interface DisableCommitEpochRequest { -} - -export interface DisableCommitEpochResponse { - currentVersion: HummockVersion | undefined; -} - -export interface RiseCtlListCompactionGroupRequest { -} - -export interface RiseCtlListCompactionGroupResponse { - status: Status | undefined; - compactionGroups: CompactionGroupInfo[]; -} - -export interface RiseCtlUpdateCompactionConfigRequest { - compactionGroupIds: number[]; - configs: RiseCtlUpdateCompactionConfigRequest_MutableConfig[]; -} - -export interface RiseCtlUpdateCompactionConfigRequest_MutableConfig { - mutableConfig?: - | { $case: "maxBytesForLevelBase"; maxBytesForLevelBase: number } - | { $case: "maxBytesForLevelMultiplier"; maxBytesForLevelMultiplier: number } - | { $case: "maxCompactionBytes"; maxCompactionBytes: number } - | { $case: "subLevelMaxCompactionBytes"; subLevelMaxCompactionBytes: number } - | { $case: "level0TierCompactFileNumber"; level0TierCompactFileNumber: number } - | { $case: "targetFileSizeBase"; targetFileSizeBase: number } - | { $case: "compactionFilterMask"; compactionFilterMask: number } - | { $case: "maxSubCompaction"; maxSubCompaction: number }; -} - -export interface RiseCtlUpdateCompactionConfigResponse { - status: Status | undefined; -} - -export interface SetCompactorRuntimeConfigRequest { - contextId: number; - config: CompactorRuntimeConfig | undefined; -} - -export interface SetCompactorRuntimeConfigResponse { -} - -export interface PinVersionRequest { - contextId: number; -} - -export interface PinVersionResponse { - pinnedVersion: HummockVersion | undefined; -} - -export interface SplitCompactionGroupRequest { - groupId: number; - tableIds: number[]; -} - -export interface SplitCompactionGroupResponse { - newGroupId: number; -} - -export interface CompactionConfig { - maxBytesForLevelBase: number; - maxLevel: number; - maxBytesForLevelMultiplier: number; - maxCompactionBytes: number; - subLevelMaxCompactionBytes: number; - level0TierCompactFileNumber: number; - compactionMode: CompactionConfig_CompactionMode; - compressionAlgorithm: string[]; - targetFileSizeBase: number; - compactionFilterMask: number; - maxSubCompaction: number; - maxSpaceReclaimBytes: number; - splitByStateTable: boolean; -} - -export const CompactionConfig_CompactionMode = { - UNSPECIFIED: "UNSPECIFIED", - RANGE: "RANGE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type CompactionConfig_CompactionMode = - typeof CompactionConfig_CompactionMode[keyof typeof CompactionConfig_CompactionMode]; - -export function compactionConfig_CompactionModeFromJSON(object: any): CompactionConfig_CompactionMode { - switch (object) { - case 0: - case "UNSPECIFIED": - return CompactionConfig_CompactionMode.UNSPECIFIED; - case 1: - case "RANGE": - return CompactionConfig_CompactionMode.RANGE; - case -1: - case "UNRECOGNIZED": - default: - return CompactionConfig_CompactionMode.UNRECOGNIZED; - } -} - -export function compactionConfig_CompactionModeToJSON(object: CompactionConfig_CompactionMode): string { - switch (object) { - case CompactionConfig_CompactionMode.UNSPECIFIED: - return "UNSPECIFIED"; - case CompactionConfig_CompactionMode.RANGE: - return "RANGE"; - case CompactionConfig_CompactionMode.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableStats { - totalKeySize: number; - totalValueSize: number; - totalKeyCount: number; -} - -export interface HummockVersionStats { - hummockVersionId: number; - tableStats: { [key: number]: TableStats }; -} - -export interface HummockVersionStats_TableStatsEntry { - key: number; - value: TableStats | undefined; -} - -function createBaseSstableInfo(): SstableInfo { - return { - id: 0, - keyRange: undefined, - fileSize: 0, - tableIds: [], - metaOffset: 0, - staleKeyCount: 0, - totalKeyCount: 0, - divideVersion: 0, - minEpoch: 0, - maxEpoch: 0, - uncompressedFileSize: 0, - }; -} - -export const SstableInfo = { - fromJSON(object: any): SstableInfo { - return { - id: isSet(object.id) ? Number(object.id) : 0, - keyRange: isSet(object.keyRange) ? KeyRange.fromJSON(object.keyRange) : undefined, - fileSize: isSet(object.fileSize) ? Number(object.fileSize) : 0, - tableIds: Array.isArray(object?.tableIds) ? object.tableIds.map((e: any) => Number(e)) : [], - metaOffset: isSet(object.metaOffset) ? Number(object.metaOffset) : 0, - staleKeyCount: isSet(object.staleKeyCount) ? Number(object.staleKeyCount) : 0, - totalKeyCount: isSet(object.totalKeyCount) ? Number(object.totalKeyCount) : 0, - divideVersion: isSet(object.divideVersion) ? Number(object.divideVersion) : 0, - minEpoch: isSet(object.minEpoch) ? Number(object.minEpoch) : 0, - maxEpoch: isSet(object.maxEpoch) ? Number(object.maxEpoch) : 0, - uncompressedFileSize: isSet(object.uncompressedFileSize) ? Number(object.uncompressedFileSize) : 0, - }; - }, - - toJSON(message: SstableInfo): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.keyRange !== undefined && (obj.keyRange = message.keyRange ? KeyRange.toJSON(message.keyRange) : undefined); - message.fileSize !== undefined && (obj.fileSize = Math.round(message.fileSize)); - if (message.tableIds) { - obj.tableIds = message.tableIds.map((e) => Math.round(e)); - } else { - obj.tableIds = []; - } - message.metaOffset !== undefined && (obj.metaOffset = Math.round(message.metaOffset)); - message.staleKeyCount !== undefined && (obj.staleKeyCount = Math.round(message.staleKeyCount)); - message.totalKeyCount !== undefined && (obj.totalKeyCount = Math.round(message.totalKeyCount)); - message.divideVersion !== undefined && (obj.divideVersion = Math.round(message.divideVersion)); - message.minEpoch !== undefined && (obj.minEpoch = Math.round(message.minEpoch)); - message.maxEpoch !== undefined && (obj.maxEpoch = Math.round(message.maxEpoch)); - message.uncompressedFileSize !== undefined && (obj.uncompressedFileSize = Math.round(message.uncompressedFileSize)); - return obj; - }, - - fromPartial, I>>(object: I): SstableInfo { - const message = createBaseSstableInfo(); - message.id = object.id ?? 0; - message.keyRange = (object.keyRange !== undefined && object.keyRange !== null) - ? KeyRange.fromPartial(object.keyRange) - : undefined; - message.fileSize = object.fileSize ?? 0; - message.tableIds = object.tableIds?.map((e) => e) || []; - message.metaOffset = object.metaOffset ?? 0; - message.staleKeyCount = object.staleKeyCount ?? 0; - message.totalKeyCount = object.totalKeyCount ?? 0; - message.divideVersion = object.divideVersion ?? 0; - message.minEpoch = object.minEpoch ?? 0; - message.maxEpoch = object.maxEpoch ?? 0; - message.uncompressedFileSize = object.uncompressedFileSize ?? 0; - return message; - }, -}; - -function createBaseOverlappingLevel(): OverlappingLevel { - return { subLevels: [], totalFileSize: 0, uncompressedFileSize: 0 }; -} - -export const OverlappingLevel = { - fromJSON(object: any): OverlappingLevel { - return { - subLevels: Array.isArray(object?.subLevels) ? object.subLevels.map((e: any) => Level.fromJSON(e)) : [], - totalFileSize: isSet(object.totalFileSize) ? Number(object.totalFileSize) : 0, - uncompressedFileSize: isSet(object.uncompressedFileSize) ? Number(object.uncompressedFileSize) : 0, - }; - }, - - toJSON(message: OverlappingLevel): unknown { - const obj: any = {}; - if (message.subLevels) { - obj.subLevels = message.subLevels.map((e) => e ? Level.toJSON(e) : undefined); - } else { - obj.subLevels = []; - } - message.totalFileSize !== undefined && (obj.totalFileSize = Math.round(message.totalFileSize)); - message.uncompressedFileSize !== undefined && (obj.uncompressedFileSize = Math.round(message.uncompressedFileSize)); - return obj; - }, - - fromPartial, I>>(object: I): OverlappingLevel { - const message = createBaseOverlappingLevel(); - message.subLevels = object.subLevels?.map((e) => Level.fromPartial(e)) || []; - message.totalFileSize = object.totalFileSize ?? 0; - message.uncompressedFileSize = object.uncompressedFileSize ?? 0; - return message; - }, -}; - -function createBaseLevel(): Level { - return { - levelIdx: 0, - levelType: LevelType.UNSPECIFIED, - tableInfos: [], - totalFileSize: 0, - subLevelId: 0, - uncompressedFileSize: 0, - }; -} - -export const Level = { - fromJSON(object: any): Level { - return { - levelIdx: isSet(object.levelIdx) ? Number(object.levelIdx) : 0, - levelType: isSet(object.levelType) ? levelTypeFromJSON(object.levelType) : LevelType.UNSPECIFIED, - tableInfos: Array.isArray(object?.tableInfos) ? object.tableInfos.map((e: any) => SstableInfo.fromJSON(e)) : [], - totalFileSize: isSet(object.totalFileSize) ? Number(object.totalFileSize) : 0, - subLevelId: isSet(object.subLevelId) ? Number(object.subLevelId) : 0, - uncompressedFileSize: isSet(object.uncompressedFileSize) ? Number(object.uncompressedFileSize) : 0, - }; - }, - - toJSON(message: Level): unknown { - const obj: any = {}; - message.levelIdx !== undefined && (obj.levelIdx = Math.round(message.levelIdx)); - message.levelType !== undefined && (obj.levelType = levelTypeToJSON(message.levelType)); - if (message.tableInfos) { - obj.tableInfos = message.tableInfos.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.tableInfos = []; - } - message.totalFileSize !== undefined && (obj.totalFileSize = Math.round(message.totalFileSize)); - message.subLevelId !== undefined && (obj.subLevelId = Math.round(message.subLevelId)); - message.uncompressedFileSize !== undefined && (obj.uncompressedFileSize = Math.round(message.uncompressedFileSize)); - return obj; - }, - - fromPartial, I>>(object: I): Level { - const message = createBaseLevel(); - message.levelIdx = object.levelIdx ?? 0; - message.levelType = object.levelType ?? LevelType.UNSPECIFIED; - message.tableInfos = object.tableInfos?.map((e) => SstableInfo.fromPartial(e)) || []; - message.totalFileSize = object.totalFileSize ?? 0; - message.subLevelId = object.subLevelId ?? 0; - message.uncompressedFileSize = object.uncompressedFileSize ?? 0; - return message; - }, -}; - -function createBaseInputLevel(): InputLevel { - return { levelIdx: 0, levelType: LevelType.UNSPECIFIED, tableInfos: [] }; -} - -export const InputLevel = { - fromJSON(object: any): InputLevel { - return { - levelIdx: isSet(object.levelIdx) ? Number(object.levelIdx) : 0, - levelType: isSet(object.levelType) ? levelTypeFromJSON(object.levelType) : LevelType.UNSPECIFIED, - tableInfos: Array.isArray(object?.tableInfos) ? object.tableInfos.map((e: any) => SstableInfo.fromJSON(e)) : [], - }; - }, - - toJSON(message: InputLevel): unknown { - const obj: any = {}; - message.levelIdx !== undefined && (obj.levelIdx = Math.round(message.levelIdx)); - message.levelType !== undefined && (obj.levelType = levelTypeToJSON(message.levelType)); - if (message.tableInfos) { - obj.tableInfos = message.tableInfos.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.tableInfos = []; - } - return obj; - }, - - fromPartial, I>>(object: I): InputLevel { - const message = createBaseInputLevel(); - message.levelIdx = object.levelIdx ?? 0; - message.levelType = object.levelType ?? LevelType.UNSPECIFIED; - message.tableInfos = object.tableInfos?.map((e) => SstableInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseIntraLevelDelta(): IntraLevelDelta { - return { levelIdx: 0, l0SubLevelId: 0, removedTableIds: [], insertedTableInfos: [] }; -} - -export const IntraLevelDelta = { - fromJSON(object: any): IntraLevelDelta { - return { - levelIdx: isSet(object.levelIdx) ? Number(object.levelIdx) : 0, - l0SubLevelId: isSet(object.l0SubLevelId) ? Number(object.l0SubLevelId) : 0, - removedTableIds: Array.isArray(object?.removedTableIds) ? object.removedTableIds.map((e: any) => Number(e)) : [], - insertedTableInfos: Array.isArray(object?.insertedTableInfos) - ? object.insertedTableInfos.map((e: any) => SstableInfo.fromJSON(e)) - : [], - }; - }, - - toJSON(message: IntraLevelDelta): unknown { - const obj: any = {}; - message.levelIdx !== undefined && (obj.levelIdx = Math.round(message.levelIdx)); - message.l0SubLevelId !== undefined && (obj.l0SubLevelId = Math.round(message.l0SubLevelId)); - if (message.removedTableIds) { - obj.removedTableIds = message.removedTableIds.map((e) => Math.round(e)); - } else { - obj.removedTableIds = []; - } - if (message.insertedTableInfos) { - obj.insertedTableInfos = message.insertedTableInfos.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.insertedTableInfos = []; - } - return obj; - }, - - fromPartial, I>>(object: I): IntraLevelDelta { - const message = createBaseIntraLevelDelta(); - message.levelIdx = object.levelIdx ?? 0; - message.l0SubLevelId = object.l0SubLevelId ?? 0; - message.removedTableIds = object.removedTableIds?.map((e) => e) || []; - message.insertedTableInfos = object.insertedTableInfos?.map((e) => SstableInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseGroupConstruct(): GroupConstruct { - return { groupConfig: undefined, parentGroupId: 0, tableIds: [], groupId: 0 }; -} - -export const GroupConstruct = { - fromJSON(object: any): GroupConstruct { - return { - groupConfig: isSet(object.groupConfig) ? CompactionConfig.fromJSON(object.groupConfig) : undefined, - parentGroupId: isSet(object.parentGroupId) ? Number(object.parentGroupId) : 0, - tableIds: Array.isArray(object?.tableIds) ? object.tableIds.map((e: any) => Number(e)) : [], - groupId: isSet(object.groupId) ? Number(object.groupId) : 0, - }; - }, - - toJSON(message: GroupConstruct): unknown { - const obj: any = {}; - message.groupConfig !== undefined && - (obj.groupConfig = message.groupConfig ? CompactionConfig.toJSON(message.groupConfig) : undefined); - message.parentGroupId !== undefined && (obj.parentGroupId = Math.round(message.parentGroupId)); - if (message.tableIds) { - obj.tableIds = message.tableIds.map((e) => Math.round(e)); - } else { - obj.tableIds = []; - } - message.groupId !== undefined && (obj.groupId = Math.round(message.groupId)); - return obj; - }, - - fromPartial, I>>(object: I): GroupConstruct { - const message = createBaseGroupConstruct(); - message.groupConfig = (object.groupConfig !== undefined && object.groupConfig !== null) - ? CompactionConfig.fromPartial(object.groupConfig) - : undefined; - message.parentGroupId = object.parentGroupId ?? 0; - message.tableIds = object.tableIds?.map((e) => e) || []; - message.groupId = object.groupId ?? 0; - return message; - }, -}; - -function createBaseGroupMetaChange(): GroupMetaChange { - return { tableIdsAdd: [], tableIdsRemove: [] }; -} - -export const GroupMetaChange = { - fromJSON(object: any): GroupMetaChange { - return { - tableIdsAdd: Array.isArray(object?.tableIdsAdd) ? object.tableIdsAdd.map((e: any) => Number(e)) : [], - tableIdsRemove: Array.isArray(object?.tableIdsRemove) ? object.tableIdsRemove.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: GroupMetaChange): unknown { - const obj: any = {}; - if (message.tableIdsAdd) { - obj.tableIdsAdd = message.tableIdsAdd.map((e) => Math.round(e)); - } else { - obj.tableIdsAdd = []; - } - if (message.tableIdsRemove) { - obj.tableIdsRemove = message.tableIdsRemove.map((e) => Math.round(e)); - } else { - obj.tableIdsRemove = []; - } - return obj; - }, - - fromPartial, I>>(object: I): GroupMetaChange { - const message = createBaseGroupMetaChange(); - message.tableIdsAdd = object.tableIdsAdd?.map((e) => e) || []; - message.tableIdsRemove = object.tableIdsRemove?.map((e) => e) || []; - return message; - }, -}; - -function createBaseGroupDestroy(): GroupDestroy { - return {}; -} - -export const GroupDestroy = { - fromJSON(_: any): GroupDestroy { - return {}; - }, - - toJSON(_: GroupDestroy): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GroupDestroy { - const message = createBaseGroupDestroy(); - return message; - }, -}; - -function createBaseGroupDelta(): GroupDelta { - return { deltaType: undefined }; -} - -export const GroupDelta = { - fromJSON(object: any): GroupDelta { - return { - deltaType: isSet(object.intraLevel) - ? { $case: "intraLevel", intraLevel: IntraLevelDelta.fromJSON(object.intraLevel) } - : isSet(object.groupConstruct) - ? { $case: "groupConstruct", groupConstruct: GroupConstruct.fromJSON(object.groupConstruct) } - : isSet(object.groupDestroy) - ? { $case: "groupDestroy", groupDestroy: GroupDestroy.fromJSON(object.groupDestroy) } - : isSet(object.groupMetaChange) - ? { $case: "groupMetaChange", groupMetaChange: GroupMetaChange.fromJSON(object.groupMetaChange) } - : undefined, - }; - }, - - toJSON(message: GroupDelta): unknown { - const obj: any = {}; - message.deltaType?.$case === "intraLevel" && (obj.intraLevel = message.deltaType?.intraLevel - ? IntraLevelDelta.toJSON(message.deltaType?.intraLevel) - : undefined); - message.deltaType?.$case === "groupConstruct" && (obj.groupConstruct = message.deltaType?.groupConstruct - ? GroupConstruct.toJSON(message.deltaType?.groupConstruct) - : undefined); - message.deltaType?.$case === "groupDestroy" && (obj.groupDestroy = message.deltaType?.groupDestroy - ? GroupDestroy.toJSON(message.deltaType?.groupDestroy) - : undefined); - message.deltaType?.$case === "groupMetaChange" && (obj.groupMetaChange = message.deltaType?.groupMetaChange - ? GroupMetaChange.toJSON(message.deltaType?.groupMetaChange) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GroupDelta { - const message = createBaseGroupDelta(); - if ( - object.deltaType?.$case === "intraLevel" && - object.deltaType?.intraLevel !== undefined && - object.deltaType?.intraLevel !== null - ) { - message.deltaType = { $case: "intraLevel", intraLevel: IntraLevelDelta.fromPartial(object.deltaType.intraLevel) }; - } - if ( - object.deltaType?.$case === "groupConstruct" && - object.deltaType?.groupConstruct !== undefined && - object.deltaType?.groupConstruct !== null - ) { - message.deltaType = { - $case: "groupConstruct", - groupConstruct: GroupConstruct.fromPartial(object.deltaType.groupConstruct), - }; - } - if ( - object.deltaType?.$case === "groupDestroy" && - object.deltaType?.groupDestroy !== undefined && - object.deltaType?.groupDestroy !== null - ) { - message.deltaType = { - $case: "groupDestroy", - groupDestroy: GroupDestroy.fromPartial(object.deltaType.groupDestroy), - }; - } - if ( - object.deltaType?.$case === "groupMetaChange" && - object.deltaType?.groupMetaChange !== undefined && - object.deltaType?.groupMetaChange !== null - ) { - message.deltaType = { - $case: "groupMetaChange", - groupMetaChange: GroupMetaChange.fromPartial(object.deltaType.groupMetaChange), - }; - } - return message; - }, -}; - -function createBaseUncommittedEpoch(): UncommittedEpoch { - return { epoch: 0, tables: [] }; -} - -export const UncommittedEpoch = { - fromJSON(object: any): UncommittedEpoch { - return { - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => SstableInfo.fromJSON(e)) : [], - }; - }, - - toJSON(message: UncommittedEpoch): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - if (message.tables) { - obj.tables = message.tables.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.tables = []; - } - return obj; - }, - - fromPartial, I>>(object: I): UncommittedEpoch { - const message = createBaseUncommittedEpoch(); - message.epoch = object.epoch ?? 0; - message.tables = object.tables?.map((e) => SstableInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseHummockVersion(): HummockVersion { - return { id: 0, levels: {}, maxCommittedEpoch: 0, safeEpoch: 0 }; -} - -export const HummockVersion = { - fromJSON(object: any): HummockVersion { - return { - id: isSet(object.id) ? Number(object.id) : 0, - levels: isObject(object.levels) - ? Object.entries(object.levels).reduce<{ [key: number]: HummockVersion_Levels }>((acc, [key, value]) => { - acc[Number(key)] = HummockVersion_Levels.fromJSON(value); - return acc; - }, {}) - : {}, - maxCommittedEpoch: isSet(object.maxCommittedEpoch) ? Number(object.maxCommittedEpoch) : 0, - safeEpoch: isSet(object.safeEpoch) ? Number(object.safeEpoch) : 0, - }; - }, - - toJSON(message: HummockVersion): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - obj.levels = {}; - if (message.levels) { - Object.entries(message.levels).forEach(([k, v]) => { - obj.levels[k] = HummockVersion_Levels.toJSON(v); - }); - } - message.maxCommittedEpoch !== undefined && (obj.maxCommittedEpoch = Math.round(message.maxCommittedEpoch)); - message.safeEpoch !== undefined && (obj.safeEpoch = Math.round(message.safeEpoch)); - return obj; - }, - - fromPartial, I>>(object: I): HummockVersion { - const message = createBaseHummockVersion(); - message.id = object.id ?? 0; - message.levels = Object.entries(object.levels ?? {}).reduce<{ [key: number]: HummockVersion_Levels }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = HummockVersion_Levels.fromPartial(value); - } - return acc; - }, - {}, - ); - message.maxCommittedEpoch = object.maxCommittedEpoch ?? 0; - message.safeEpoch = object.safeEpoch ?? 0; - return message; - }, -}; - -function createBaseHummockVersion_Levels(): HummockVersion_Levels { - return { levels: [], l0: undefined, groupId: 0, parentGroupId: 0, memberTableIds: [] }; -} - -export const HummockVersion_Levels = { - fromJSON(object: any): HummockVersion_Levels { - return { - levels: Array.isArray(object?.levels) ? object.levels.map((e: any) => Level.fromJSON(e)) : [], - l0: isSet(object.l0) ? OverlappingLevel.fromJSON(object.l0) : undefined, - groupId: isSet(object.groupId) ? Number(object.groupId) : 0, - parentGroupId: isSet(object.parentGroupId) ? Number(object.parentGroupId) : 0, - memberTableIds: Array.isArray(object?.memberTableIds) ? object.memberTableIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: HummockVersion_Levels): unknown { - const obj: any = {}; - if (message.levels) { - obj.levels = message.levels.map((e) => e ? Level.toJSON(e) : undefined); - } else { - obj.levels = []; - } - message.l0 !== undefined && (obj.l0 = message.l0 ? OverlappingLevel.toJSON(message.l0) : undefined); - message.groupId !== undefined && (obj.groupId = Math.round(message.groupId)); - message.parentGroupId !== undefined && (obj.parentGroupId = Math.round(message.parentGroupId)); - if (message.memberTableIds) { - obj.memberTableIds = message.memberTableIds.map((e) => Math.round(e)); - } else { - obj.memberTableIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HummockVersion_Levels { - const message = createBaseHummockVersion_Levels(); - message.levels = object.levels?.map((e) => Level.fromPartial(e)) || []; - message.l0 = (object.l0 !== undefined && object.l0 !== null) ? OverlappingLevel.fromPartial(object.l0) : undefined; - message.groupId = object.groupId ?? 0; - message.parentGroupId = object.parentGroupId ?? 0; - message.memberTableIds = object.memberTableIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseHummockVersion_LevelsEntry(): HummockVersion_LevelsEntry { - return { key: 0, value: undefined }; -} - -export const HummockVersion_LevelsEntry = { - fromJSON(object: any): HummockVersion_LevelsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? HummockVersion_Levels.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: HummockVersion_LevelsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? HummockVersion_Levels.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): HummockVersion_LevelsEntry { - const message = createBaseHummockVersion_LevelsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? HummockVersion_Levels.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseHummockVersionDelta(): HummockVersionDelta { - return { id: 0, prevId: 0, groupDeltas: {}, maxCommittedEpoch: 0, safeEpoch: 0, trivialMove: false, gcSstIds: [] }; -} - -export const HummockVersionDelta = { - fromJSON(object: any): HummockVersionDelta { - return { - id: isSet(object.id) ? Number(object.id) : 0, - prevId: isSet(object.prevId) ? Number(object.prevId) : 0, - groupDeltas: isObject(object.groupDeltas) - ? Object.entries(object.groupDeltas).reduce<{ [key: number]: HummockVersionDelta_GroupDeltas }>( - (acc, [key, value]) => { - acc[Number(key)] = HummockVersionDelta_GroupDeltas.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - maxCommittedEpoch: isSet(object.maxCommittedEpoch) ? Number(object.maxCommittedEpoch) : 0, - safeEpoch: isSet(object.safeEpoch) ? Number(object.safeEpoch) : 0, - trivialMove: isSet(object.trivialMove) ? Boolean(object.trivialMove) : false, - gcSstIds: Array.isArray(object?.gcSstIds) - ? object.gcSstIds.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: HummockVersionDelta): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.prevId !== undefined && (obj.prevId = Math.round(message.prevId)); - obj.groupDeltas = {}; - if (message.groupDeltas) { - Object.entries(message.groupDeltas).forEach(([k, v]) => { - obj.groupDeltas[k] = HummockVersionDelta_GroupDeltas.toJSON(v); - }); - } - message.maxCommittedEpoch !== undefined && (obj.maxCommittedEpoch = Math.round(message.maxCommittedEpoch)); - message.safeEpoch !== undefined && (obj.safeEpoch = Math.round(message.safeEpoch)); - message.trivialMove !== undefined && (obj.trivialMove = message.trivialMove); - if (message.gcSstIds) { - obj.gcSstIds = message.gcSstIds.map((e) => Math.round(e)); - } else { - obj.gcSstIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HummockVersionDelta { - const message = createBaseHummockVersionDelta(); - message.id = object.id ?? 0; - message.prevId = object.prevId ?? 0; - message.groupDeltas = Object.entries(object.groupDeltas ?? {}).reduce< - { [key: number]: HummockVersionDelta_GroupDeltas } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = HummockVersionDelta_GroupDeltas.fromPartial(value); - } - return acc; - }, {}); - message.maxCommittedEpoch = object.maxCommittedEpoch ?? 0; - message.safeEpoch = object.safeEpoch ?? 0; - message.trivialMove = object.trivialMove ?? false; - message.gcSstIds = object.gcSstIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseHummockVersionDelta_GroupDeltas(): HummockVersionDelta_GroupDeltas { - return { groupDeltas: [] }; -} - -export const HummockVersionDelta_GroupDeltas = { - fromJSON(object: any): HummockVersionDelta_GroupDeltas { - return { - groupDeltas: Array.isArray(object?.groupDeltas) ? object.groupDeltas.map((e: any) => GroupDelta.fromJSON(e)) : [], - }; - }, - - toJSON(message: HummockVersionDelta_GroupDeltas): unknown { - const obj: any = {}; - if (message.groupDeltas) { - obj.groupDeltas = message.groupDeltas.map((e) => e ? GroupDelta.toJSON(e) : undefined); - } else { - obj.groupDeltas = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): HummockVersionDelta_GroupDeltas { - const message = createBaseHummockVersionDelta_GroupDeltas(); - message.groupDeltas = object.groupDeltas?.map((e) => GroupDelta.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseHummockVersionDelta_GroupDeltasEntry(): HummockVersionDelta_GroupDeltasEntry { - return { key: 0, value: undefined }; -} - -export const HummockVersionDelta_GroupDeltasEntry = { - fromJSON(object: any): HummockVersionDelta_GroupDeltasEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? HummockVersionDelta_GroupDeltas.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: HummockVersionDelta_GroupDeltasEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? HummockVersionDelta_GroupDeltas.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): HummockVersionDelta_GroupDeltasEntry { - const message = createBaseHummockVersionDelta_GroupDeltasEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? HummockVersionDelta_GroupDeltas.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseHummockVersionDeltas(): HummockVersionDeltas { - return { versionDeltas: [] }; -} - -export const HummockVersionDeltas = { - fromJSON(object: any): HummockVersionDeltas { - return { - versionDeltas: Array.isArray(object?.versionDeltas) - ? object.versionDeltas.map((e: any) => HummockVersionDelta.fromJSON(e)) - : [], - }; - }, - - toJSON(message: HummockVersionDeltas): unknown { - const obj: any = {}; - if (message.versionDeltas) { - obj.versionDeltas = message.versionDeltas.map((e) => e ? HummockVersionDelta.toJSON(e) : undefined); - } else { - obj.versionDeltas = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HummockVersionDeltas { - const message = createBaseHummockVersionDeltas(); - message.versionDeltas = object.versionDeltas?.map((e) => HummockVersionDelta.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseHummockSnapshot(): HummockSnapshot { - return { committedEpoch: 0, currentEpoch: 0 }; -} - -export const HummockSnapshot = { - fromJSON(object: any): HummockSnapshot { - return { - committedEpoch: isSet(object.committedEpoch) ? Number(object.committedEpoch) : 0, - currentEpoch: isSet(object.currentEpoch) ? Number(object.currentEpoch) : 0, - }; - }, - - toJSON(message: HummockSnapshot): unknown { - const obj: any = {}; - message.committedEpoch !== undefined && (obj.committedEpoch = Math.round(message.committedEpoch)); - message.currentEpoch !== undefined && (obj.currentEpoch = Math.round(message.currentEpoch)); - return obj; - }, - - fromPartial, I>>(object: I): HummockSnapshot { - const message = createBaseHummockSnapshot(); - message.committedEpoch = object.committedEpoch ?? 0; - message.currentEpoch = object.currentEpoch ?? 0; - return message; - }, -}; - -function createBaseVersionUpdatePayload(): VersionUpdatePayload { - return { payload: undefined }; -} - -export const VersionUpdatePayload = { - fromJSON(object: any): VersionUpdatePayload { - return { - payload: isSet(object.versionDeltas) - ? { $case: "versionDeltas", versionDeltas: HummockVersionDeltas.fromJSON(object.versionDeltas) } - : isSet(object.pinnedVersion) - ? { $case: "pinnedVersion", pinnedVersion: HummockVersion.fromJSON(object.pinnedVersion) } - : undefined, - }; - }, - - toJSON(message: VersionUpdatePayload): unknown { - const obj: any = {}; - message.payload?.$case === "versionDeltas" && (obj.versionDeltas = message.payload?.versionDeltas - ? HummockVersionDeltas.toJSON(message.payload?.versionDeltas) - : undefined); - message.payload?.$case === "pinnedVersion" && (obj.pinnedVersion = message.payload?.pinnedVersion - ? HummockVersion.toJSON(message.payload?.pinnedVersion) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): VersionUpdatePayload { - const message = createBaseVersionUpdatePayload(); - if ( - object.payload?.$case === "versionDeltas" && - object.payload?.versionDeltas !== undefined && - object.payload?.versionDeltas !== null - ) { - message.payload = { - $case: "versionDeltas", - versionDeltas: HummockVersionDeltas.fromPartial(object.payload.versionDeltas), - }; - } - if ( - object.payload?.$case === "pinnedVersion" && - object.payload?.pinnedVersion !== undefined && - object.payload?.pinnedVersion !== null - ) { - message.payload = { - $case: "pinnedVersion", - pinnedVersion: HummockVersion.fromPartial(object.payload.pinnedVersion), - }; - } - return message; - }, -}; - -function createBaseUnpinVersionBeforeRequest(): UnpinVersionBeforeRequest { - return { contextId: 0, unpinVersionBefore: 0 }; -} - -export const UnpinVersionBeforeRequest = { - fromJSON(object: any): UnpinVersionBeforeRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - unpinVersionBefore: isSet(object.unpinVersionBefore) ? Number(object.unpinVersionBefore) : 0, - }; - }, - - toJSON(message: UnpinVersionBeforeRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.unpinVersionBefore !== undefined && (obj.unpinVersionBefore = Math.round(message.unpinVersionBefore)); - return obj; - }, - - fromPartial, I>>(object: I): UnpinVersionBeforeRequest { - const message = createBaseUnpinVersionBeforeRequest(); - message.contextId = object.contextId ?? 0; - message.unpinVersionBefore = object.unpinVersionBefore ?? 0; - return message; - }, -}; - -function createBaseUnpinVersionBeforeResponse(): UnpinVersionBeforeResponse { - return { status: undefined }; -} - -export const UnpinVersionBeforeResponse = { - fromJSON(object: any): UnpinVersionBeforeResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: UnpinVersionBeforeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UnpinVersionBeforeResponse { - const message = createBaseUnpinVersionBeforeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseGetCurrentVersionRequest(): GetCurrentVersionRequest { - return {}; -} - -export const GetCurrentVersionRequest = { - fromJSON(_: any): GetCurrentVersionRequest { - return {}; - }, - - toJSON(_: GetCurrentVersionRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetCurrentVersionRequest { - const message = createBaseGetCurrentVersionRequest(); - return message; - }, -}; - -function createBaseGetCurrentVersionResponse(): GetCurrentVersionResponse { - return { status: undefined, currentVersion: undefined }; -} - -export const GetCurrentVersionResponse = { - fromJSON(object: any): GetCurrentVersionResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - currentVersion: isSet(object.currentVersion) ? HummockVersion.fromJSON(object.currentVersion) : undefined, - }; - }, - - toJSON(message: GetCurrentVersionResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.currentVersion !== undefined && - (obj.currentVersion = message.currentVersion ? HummockVersion.toJSON(message.currentVersion) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetCurrentVersionResponse { - const message = createBaseGetCurrentVersionResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.currentVersion = (object.currentVersion !== undefined && object.currentVersion !== null) - ? HummockVersion.fromPartial(object.currentVersion) - : undefined; - return message; - }, -}; - -function createBaseUnpinVersionRequest(): UnpinVersionRequest { - return { contextId: 0 }; -} - -export const UnpinVersionRequest = { - fromJSON(object: any): UnpinVersionRequest { - return { contextId: isSet(object.contextId) ? Number(object.contextId) : 0 }; - }, - - toJSON(message: UnpinVersionRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - return obj; - }, - - fromPartial, I>>(object: I): UnpinVersionRequest { - const message = createBaseUnpinVersionRequest(); - message.contextId = object.contextId ?? 0; - return message; - }, -}; - -function createBaseUnpinVersionResponse(): UnpinVersionResponse { - return { status: undefined }; -} - -export const UnpinVersionResponse = { - fromJSON(object: any): UnpinVersionResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: UnpinVersionResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UnpinVersionResponse { - const message = createBaseUnpinVersionResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBasePinSnapshotRequest(): PinSnapshotRequest { - return { contextId: 0 }; -} - -export const PinSnapshotRequest = { - fromJSON(object: any): PinSnapshotRequest { - return { contextId: isSet(object.contextId) ? Number(object.contextId) : 0 }; - }, - - toJSON(message: PinSnapshotRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - return obj; - }, - - fromPartial, I>>(object: I): PinSnapshotRequest { - const message = createBasePinSnapshotRequest(); - message.contextId = object.contextId ?? 0; - return message; - }, -}; - -function createBasePinSpecificSnapshotRequest(): PinSpecificSnapshotRequest { - return { contextId: 0, epoch: 0 }; -} - -export const PinSpecificSnapshotRequest = { - fromJSON(object: any): PinSpecificSnapshotRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - }; - }, - - toJSON(message: PinSpecificSnapshotRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): PinSpecificSnapshotRequest { - const message = createBasePinSpecificSnapshotRequest(); - message.contextId = object.contextId ?? 0; - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseGetAssignedCompactTaskNumRequest(): GetAssignedCompactTaskNumRequest { - return {}; -} - -export const GetAssignedCompactTaskNumRequest = { - fromJSON(_: any): GetAssignedCompactTaskNumRequest { - return {}; - }, - - toJSON(_: GetAssignedCompactTaskNumRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): GetAssignedCompactTaskNumRequest { - const message = createBaseGetAssignedCompactTaskNumRequest(); - return message; - }, -}; - -function createBaseGetAssignedCompactTaskNumResponse(): GetAssignedCompactTaskNumResponse { - return { numTasks: 0 }; -} - -export const GetAssignedCompactTaskNumResponse = { - fromJSON(object: any): GetAssignedCompactTaskNumResponse { - return { numTasks: isSet(object.numTasks) ? Number(object.numTasks) : 0 }; - }, - - toJSON(message: GetAssignedCompactTaskNumResponse): unknown { - const obj: any = {}; - message.numTasks !== undefined && (obj.numTasks = Math.round(message.numTasks)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetAssignedCompactTaskNumResponse { - const message = createBaseGetAssignedCompactTaskNumResponse(); - message.numTasks = object.numTasks ?? 0; - return message; - }, -}; - -function createBasePinSnapshotResponse(): PinSnapshotResponse { - return { status: undefined, snapshot: undefined }; -} - -export const PinSnapshotResponse = { - fromJSON(object: any): PinSnapshotResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - snapshot: isSet(object.snapshot) ? HummockSnapshot.fromJSON(object.snapshot) : undefined, - }; - }, - - toJSON(message: PinSnapshotResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.snapshot !== undefined && - (obj.snapshot = message.snapshot ? HummockSnapshot.toJSON(message.snapshot) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): PinSnapshotResponse { - const message = createBasePinSnapshotResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.snapshot = (object.snapshot !== undefined && object.snapshot !== null) - ? HummockSnapshot.fromPartial(object.snapshot) - : undefined; - return message; - }, -}; - -function createBaseGetEpochRequest(): GetEpochRequest { - return {}; -} - -export const GetEpochRequest = { - fromJSON(_: any): GetEpochRequest { - return {}; - }, - - toJSON(_: GetEpochRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetEpochRequest { - const message = createBaseGetEpochRequest(); - return message; - }, -}; - -function createBaseGetEpochResponse(): GetEpochResponse { - return { status: undefined, snapshot: undefined }; -} - -export const GetEpochResponse = { - fromJSON(object: any): GetEpochResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - snapshot: isSet(object.snapshot) ? HummockSnapshot.fromJSON(object.snapshot) : undefined, - }; - }, - - toJSON(message: GetEpochResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.snapshot !== undefined && - (obj.snapshot = message.snapshot ? HummockSnapshot.toJSON(message.snapshot) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetEpochResponse { - const message = createBaseGetEpochResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.snapshot = (object.snapshot !== undefined && object.snapshot !== null) - ? HummockSnapshot.fromPartial(object.snapshot) - : undefined; - return message; - }, -}; - -function createBaseUnpinSnapshotRequest(): UnpinSnapshotRequest { - return { contextId: 0 }; -} - -export const UnpinSnapshotRequest = { - fromJSON(object: any): UnpinSnapshotRequest { - return { contextId: isSet(object.contextId) ? Number(object.contextId) : 0 }; - }, - - toJSON(message: UnpinSnapshotRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - return obj; - }, - - fromPartial, I>>(object: I): UnpinSnapshotRequest { - const message = createBaseUnpinSnapshotRequest(); - message.contextId = object.contextId ?? 0; - return message; - }, -}; - -function createBaseUnpinSnapshotResponse(): UnpinSnapshotResponse { - return { status: undefined }; -} - -export const UnpinSnapshotResponse = { - fromJSON(object: any): UnpinSnapshotResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: UnpinSnapshotResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UnpinSnapshotResponse { - const message = createBaseUnpinSnapshotResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseUnpinSnapshotBeforeRequest(): UnpinSnapshotBeforeRequest { - return { contextId: 0, minSnapshot: undefined }; -} - -export const UnpinSnapshotBeforeRequest = { - fromJSON(object: any): UnpinSnapshotBeforeRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - minSnapshot: isSet(object.minSnapshot) ? HummockSnapshot.fromJSON(object.minSnapshot) : undefined, - }; - }, - - toJSON(message: UnpinSnapshotBeforeRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.minSnapshot !== undefined && - (obj.minSnapshot = message.minSnapshot ? HummockSnapshot.toJSON(message.minSnapshot) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UnpinSnapshotBeforeRequest { - const message = createBaseUnpinSnapshotBeforeRequest(); - message.contextId = object.contextId ?? 0; - message.minSnapshot = (object.minSnapshot !== undefined && object.minSnapshot !== null) - ? HummockSnapshot.fromPartial(object.minSnapshot) - : undefined; - return message; - }, -}; - -function createBaseUnpinSnapshotBeforeResponse(): UnpinSnapshotBeforeResponse { - return { status: undefined }; -} - -export const UnpinSnapshotBeforeResponse = { - fromJSON(object: any): UnpinSnapshotBeforeResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: UnpinSnapshotBeforeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UnpinSnapshotBeforeResponse { - const message = createBaseUnpinSnapshotBeforeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseKeyRange(): KeyRange { - return { left: new Uint8Array(), right: new Uint8Array(), rightExclusive: false }; -} - -export const KeyRange = { - fromJSON(object: any): KeyRange { - return { - left: isSet(object.left) ? bytesFromBase64(object.left) : new Uint8Array(), - right: isSet(object.right) ? bytesFromBase64(object.right) : new Uint8Array(), - rightExclusive: isSet(object.rightExclusive) ? Boolean(object.rightExclusive) : false, - }; - }, - - toJSON(message: KeyRange): unknown { - const obj: any = {}; - message.left !== undefined && - (obj.left = base64FromBytes(message.left !== undefined ? message.left : new Uint8Array())); - message.right !== undefined && - (obj.right = base64FromBytes(message.right !== undefined ? message.right : new Uint8Array())); - message.rightExclusive !== undefined && (obj.rightExclusive = message.rightExclusive); - return obj; - }, - - fromPartial, I>>(object: I): KeyRange { - const message = createBaseKeyRange(); - message.left = object.left ?? new Uint8Array(); - message.right = object.right ?? new Uint8Array(); - message.rightExclusive = object.rightExclusive ?? false; - return message; - }, -}; - -function createBaseTableOption(): TableOption { - return { retentionSeconds: 0 }; -} - -export const TableOption = { - fromJSON(object: any): TableOption { - return { retentionSeconds: isSet(object.retentionSeconds) ? Number(object.retentionSeconds) : 0 }; - }, - - toJSON(message: TableOption): unknown { - const obj: any = {}; - message.retentionSeconds !== undefined && (obj.retentionSeconds = Math.round(message.retentionSeconds)); - return obj; - }, - - fromPartial, I>>(object: I): TableOption { - const message = createBaseTableOption(); - message.retentionSeconds = object.retentionSeconds ?? 0; - return message; - }, -}; - -function createBaseCompactTask(): CompactTask { - return { - inputSsts: [], - splits: [], - watermark: 0, - sortedOutputSsts: [], - taskId: 0, - targetLevel: 0, - gcDeleteKeys: false, - taskStatus: CompactTask_TaskStatus.UNSPECIFIED, - compactionGroupId: 0, - existingTableIds: [], - compressionAlgorithm: 0, - targetFileSize: 0, - compactionFilterMask: 0, - tableOptions: {}, - currentEpochTime: 0, - targetSubLevelId: 0, - taskType: CompactTask_TaskType.TYPE_UNSPECIFIED, - splitByStateTable: false, - }; -} - -export const CompactTask = { - fromJSON(object: any): CompactTask { - return { - inputSsts: Array.isArray(object?.inputSsts) ? object.inputSsts.map((e: any) => InputLevel.fromJSON(e)) : [], - splits: Array.isArray(object?.splits) ? object.splits.map((e: any) => KeyRange.fromJSON(e)) : [], - watermark: isSet(object.watermark) ? Number(object.watermark) : 0, - sortedOutputSsts: Array.isArray(object?.sortedOutputSsts) - ? object.sortedOutputSsts.map((e: any) => SstableInfo.fromJSON(e)) - : [], - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - targetLevel: isSet(object.targetLevel) ? Number(object.targetLevel) : 0, - gcDeleteKeys: isSet(object.gcDeleteKeys) ? Boolean(object.gcDeleteKeys) : false, - taskStatus: isSet(object.taskStatus) - ? compactTask_TaskStatusFromJSON(object.taskStatus) - : CompactTask_TaskStatus.UNSPECIFIED, - compactionGroupId: isSet(object.compactionGroupId) ? Number(object.compactionGroupId) : 0, - existingTableIds: Array.isArray(object?.existingTableIds) - ? object.existingTableIds.map((e: any) => Number(e)) - : [], - compressionAlgorithm: isSet(object.compressionAlgorithm) ? Number(object.compressionAlgorithm) : 0, - targetFileSize: isSet(object.targetFileSize) ? Number(object.targetFileSize) : 0, - compactionFilterMask: isSet(object.compactionFilterMask) ? Number(object.compactionFilterMask) : 0, - tableOptions: isObject(object.tableOptions) - ? Object.entries(object.tableOptions).reduce<{ [key: number]: TableOption }>((acc, [key, value]) => { - acc[Number(key)] = TableOption.fromJSON(value); - return acc; - }, {}) - : {}, - currentEpochTime: isSet(object.currentEpochTime) ? Number(object.currentEpochTime) : 0, - targetSubLevelId: isSet(object.targetSubLevelId) ? Number(object.targetSubLevelId) : 0, - taskType: isSet(object.taskType) - ? compactTask_TaskTypeFromJSON(object.taskType) - : CompactTask_TaskType.TYPE_UNSPECIFIED, - splitByStateTable: isSet(object.splitByStateTable) ? Boolean(object.splitByStateTable) : false, - }; - }, - - toJSON(message: CompactTask): unknown { - const obj: any = {}; - if (message.inputSsts) { - obj.inputSsts = message.inputSsts.map((e) => e ? InputLevel.toJSON(e) : undefined); - } else { - obj.inputSsts = []; - } - if (message.splits) { - obj.splits = message.splits.map((e) => e ? KeyRange.toJSON(e) : undefined); - } else { - obj.splits = []; - } - message.watermark !== undefined && (obj.watermark = Math.round(message.watermark)); - if (message.sortedOutputSsts) { - obj.sortedOutputSsts = message.sortedOutputSsts.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.sortedOutputSsts = []; - } - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - message.targetLevel !== undefined && (obj.targetLevel = Math.round(message.targetLevel)); - message.gcDeleteKeys !== undefined && (obj.gcDeleteKeys = message.gcDeleteKeys); - message.taskStatus !== undefined && (obj.taskStatus = compactTask_TaskStatusToJSON(message.taskStatus)); - message.compactionGroupId !== undefined && (obj.compactionGroupId = Math.round(message.compactionGroupId)); - if (message.existingTableIds) { - obj.existingTableIds = message.existingTableIds.map((e) => Math.round(e)); - } else { - obj.existingTableIds = []; - } - message.compressionAlgorithm !== undefined && (obj.compressionAlgorithm = Math.round(message.compressionAlgorithm)); - message.targetFileSize !== undefined && (obj.targetFileSize = Math.round(message.targetFileSize)); - message.compactionFilterMask !== undefined && (obj.compactionFilterMask = Math.round(message.compactionFilterMask)); - obj.tableOptions = {}; - if (message.tableOptions) { - Object.entries(message.tableOptions).forEach(([k, v]) => { - obj.tableOptions[k] = TableOption.toJSON(v); - }); - } - message.currentEpochTime !== undefined && (obj.currentEpochTime = Math.round(message.currentEpochTime)); - message.targetSubLevelId !== undefined && (obj.targetSubLevelId = Math.round(message.targetSubLevelId)); - message.taskType !== undefined && (obj.taskType = compactTask_TaskTypeToJSON(message.taskType)); - message.splitByStateTable !== undefined && (obj.splitByStateTable = message.splitByStateTable); - return obj; - }, - - fromPartial, I>>(object: I): CompactTask { - const message = createBaseCompactTask(); - message.inputSsts = object.inputSsts?.map((e) => InputLevel.fromPartial(e)) || []; - message.splits = object.splits?.map((e) => KeyRange.fromPartial(e)) || []; - message.watermark = object.watermark ?? 0; - message.sortedOutputSsts = object.sortedOutputSsts?.map((e) => SstableInfo.fromPartial(e)) || []; - message.taskId = object.taskId ?? 0; - message.targetLevel = object.targetLevel ?? 0; - message.gcDeleteKeys = object.gcDeleteKeys ?? false; - message.taskStatus = object.taskStatus ?? CompactTask_TaskStatus.UNSPECIFIED; - message.compactionGroupId = object.compactionGroupId ?? 0; - message.existingTableIds = object.existingTableIds?.map((e) => e) || []; - message.compressionAlgorithm = object.compressionAlgorithm ?? 0; - message.targetFileSize = object.targetFileSize ?? 0; - message.compactionFilterMask = object.compactionFilterMask ?? 0; - message.tableOptions = Object.entries(object.tableOptions ?? {}).reduce<{ [key: number]: TableOption }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableOption.fromPartial(value); - } - return acc; - }, - {}, - ); - message.currentEpochTime = object.currentEpochTime ?? 0; - message.targetSubLevelId = object.targetSubLevelId ?? 0; - message.taskType = object.taskType ?? CompactTask_TaskType.TYPE_UNSPECIFIED; - message.splitByStateTable = object.splitByStateTable ?? false; - return message; - }, -}; - -function createBaseCompactTask_TableOptionsEntry(): CompactTask_TableOptionsEntry { - return { key: 0, value: undefined }; -} - -export const CompactTask_TableOptionsEntry = { - fromJSON(object: any): CompactTask_TableOptionsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableOption.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: CompactTask_TableOptionsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? TableOption.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CompactTask_TableOptionsEntry { - const message = createBaseCompactTask_TableOptionsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableOption.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseLevelHandler(): LevelHandler { - return { level: 0, tasks: [] }; -} - -export const LevelHandler = { - fromJSON(object: any): LevelHandler { - return { - level: isSet(object.level) ? Number(object.level) : 0, - tasks: Array.isArray(object?.tasks) - ? object.tasks.map((e: any) => LevelHandler_RunningCompactTask.fromJSON(e)) - : [], - }; - }, - - toJSON(message: LevelHandler): unknown { - const obj: any = {}; - message.level !== undefined && (obj.level = Math.round(message.level)); - if (message.tasks) { - obj.tasks = message.tasks.map((e) => e ? LevelHandler_RunningCompactTask.toJSON(e) : undefined); - } else { - obj.tasks = []; - } - return obj; - }, - - fromPartial, I>>(object: I): LevelHandler { - const message = createBaseLevelHandler(); - message.level = object.level ?? 0; - message.tasks = object.tasks?.map((e) => LevelHandler_RunningCompactTask.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseLevelHandler_RunningCompactTask(): LevelHandler_RunningCompactTask { - return { taskId: 0, ssts: [], totalFileSize: 0, targetLevel: 0 }; -} - -export const LevelHandler_RunningCompactTask = { - fromJSON(object: any): LevelHandler_RunningCompactTask { - return { - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - ssts: Array.isArray(object?.ssts) ? object.ssts.map((e: any) => Number(e)) : [], - totalFileSize: isSet(object.totalFileSize) ? Number(object.totalFileSize) : 0, - targetLevel: isSet(object.targetLevel) ? Number(object.targetLevel) : 0, - }; - }, - - toJSON(message: LevelHandler_RunningCompactTask): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - if (message.ssts) { - obj.ssts = message.ssts.map((e) => Math.round(e)); - } else { - obj.ssts = []; - } - message.totalFileSize !== undefined && (obj.totalFileSize = Math.round(message.totalFileSize)); - message.targetLevel !== undefined && (obj.targetLevel = Math.round(message.targetLevel)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): LevelHandler_RunningCompactTask { - const message = createBaseLevelHandler_RunningCompactTask(); - message.taskId = object.taskId ?? 0; - message.ssts = object.ssts?.map((e) => e) || []; - message.totalFileSize = object.totalFileSize ?? 0; - message.targetLevel = object.targetLevel ?? 0; - return message; - }, -}; - -function createBaseCompactStatus(): CompactStatus { - return { compactionGroupId: 0, levelHandlers: [] }; -} - -export const CompactStatus = { - fromJSON(object: any): CompactStatus { - return { - compactionGroupId: isSet(object.compactionGroupId) ? Number(object.compactionGroupId) : 0, - levelHandlers: Array.isArray(object?.levelHandlers) - ? object.levelHandlers.map((e: any) => LevelHandler.fromJSON(e)) - : [], - }; - }, - - toJSON(message: CompactStatus): unknown { - const obj: any = {}; - message.compactionGroupId !== undefined && (obj.compactionGroupId = Math.round(message.compactionGroupId)); - if (message.levelHandlers) { - obj.levelHandlers = message.levelHandlers.map((e) => e ? LevelHandler.toJSON(e) : undefined); - } else { - obj.levelHandlers = []; - } - return obj; - }, - - fromPartial, I>>(object: I): CompactStatus { - const message = createBaseCompactStatus(); - message.compactionGroupId = object.compactionGroupId ?? 0; - message.levelHandlers = object.levelHandlers?.map((e) => LevelHandler.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCompactionGroup(): CompactionGroup { - return { id: 0, compactionConfig: undefined }; -} - -export const CompactionGroup = { - fromJSON(object: any): CompactionGroup { - return { - id: isSet(object.id) ? Number(object.id) : 0, - compactionConfig: isSet(object.compactionConfig) ? CompactionConfig.fromJSON(object.compactionConfig) : undefined, - }; - }, - - toJSON(message: CompactionGroup): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.compactionConfig !== undefined && - (obj.compactionConfig = message.compactionConfig ? CompactionConfig.toJSON(message.compactionConfig) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CompactionGroup { - const message = createBaseCompactionGroup(); - message.id = object.id ?? 0; - message.compactionConfig = (object.compactionConfig !== undefined && object.compactionConfig !== null) - ? CompactionConfig.fromPartial(object.compactionConfig) - : undefined; - return message; - }, -}; - -function createBaseCompactionGroupInfo(): CompactionGroupInfo { - return { id: 0, parentId: 0, memberTableIds: [], compactionConfig: undefined }; -} - -export const CompactionGroupInfo = { - fromJSON(object: any): CompactionGroupInfo { - return { - id: isSet(object.id) ? Number(object.id) : 0, - parentId: isSet(object.parentId) ? Number(object.parentId) : 0, - memberTableIds: Array.isArray(object?.memberTableIds) ? object.memberTableIds.map((e: any) => Number(e)) : [], - compactionConfig: isSet(object.compactionConfig) ? CompactionConfig.fromJSON(object.compactionConfig) : undefined, - }; - }, - - toJSON(message: CompactionGroupInfo): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.parentId !== undefined && (obj.parentId = Math.round(message.parentId)); - if (message.memberTableIds) { - obj.memberTableIds = message.memberTableIds.map((e) => Math.round(e)); - } else { - obj.memberTableIds = []; - } - message.compactionConfig !== undefined && - (obj.compactionConfig = message.compactionConfig ? CompactionConfig.toJSON(message.compactionConfig) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CompactionGroupInfo { - const message = createBaseCompactionGroupInfo(); - message.id = object.id ?? 0; - message.parentId = object.parentId ?? 0; - message.memberTableIds = object.memberTableIds?.map((e) => e) || []; - message.compactionConfig = (object.compactionConfig !== undefined && object.compactionConfig !== null) - ? CompactionConfig.fromPartial(object.compactionConfig) - : undefined; - return message; - }, -}; - -function createBaseCompactTaskAssignment(): CompactTaskAssignment { - return { compactTask: undefined, contextId: 0 }; -} - -export const CompactTaskAssignment = { - fromJSON(object: any): CompactTaskAssignment { - return { - compactTask: isSet(object.compactTask) ? CompactTask.fromJSON(object.compactTask) : undefined, - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - }; - }, - - toJSON(message: CompactTaskAssignment): unknown { - const obj: any = {}; - message.compactTask !== undefined && - (obj.compactTask = message.compactTask ? CompactTask.toJSON(message.compactTask) : undefined); - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - return obj; - }, - - fromPartial, I>>(object: I): CompactTaskAssignment { - const message = createBaseCompactTaskAssignment(); - message.compactTask = (object.compactTask !== undefined && object.compactTask !== null) - ? CompactTask.fromPartial(object.compactTask) - : undefined; - message.contextId = object.contextId ?? 0; - return message; - }, -}; - -function createBaseGetCompactionTasksRequest(): GetCompactionTasksRequest { - return {}; -} - -export const GetCompactionTasksRequest = { - fromJSON(_: any): GetCompactionTasksRequest { - return {}; - }, - - toJSON(_: GetCompactionTasksRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetCompactionTasksRequest { - const message = createBaseGetCompactionTasksRequest(); - return message; - }, -}; - -function createBaseGetCompactionTasksResponse(): GetCompactionTasksResponse { - return { status: undefined, compactTask: undefined }; -} - -export const GetCompactionTasksResponse = { - fromJSON(object: any): GetCompactionTasksResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - compactTask: isSet(object.compactTask) ? CompactTask.fromJSON(object.compactTask) : undefined, - }; - }, - - toJSON(message: GetCompactionTasksResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.compactTask !== undefined && - (obj.compactTask = message.compactTask ? CompactTask.toJSON(message.compactTask) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetCompactionTasksResponse { - const message = createBaseGetCompactionTasksResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.compactTask = (object.compactTask !== undefined && object.compactTask !== null) - ? CompactTask.fromPartial(object.compactTask) - : undefined; - return message; - }, -}; - -function createBaseReportCompactionTasksRequest(): ReportCompactionTasksRequest { - return { contextId: 0, compactTask: undefined, tableStatsChange: {} }; -} - -export const ReportCompactionTasksRequest = { - fromJSON(object: any): ReportCompactionTasksRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - compactTask: isSet(object.compactTask) ? CompactTask.fromJSON(object.compactTask) : undefined, - tableStatsChange: isObject(object.tableStatsChange) - ? Object.entries(object.tableStatsChange).reduce<{ [key: number]: TableStats }>((acc, [key, value]) => { - acc[Number(key)] = TableStats.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: ReportCompactionTasksRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.compactTask !== undefined && - (obj.compactTask = message.compactTask ? CompactTask.toJSON(message.compactTask) : undefined); - obj.tableStatsChange = {}; - if (message.tableStatsChange) { - Object.entries(message.tableStatsChange).forEach(([k, v]) => { - obj.tableStatsChange[k] = TableStats.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): ReportCompactionTasksRequest { - const message = createBaseReportCompactionTasksRequest(); - message.contextId = object.contextId ?? 0; - message.compactTask = (object.compactTask !== undefined && object.compactTask !== null) - ? CompactTask.fromPartial(object.compactTask) - : undefined; - message.tableStatsChange = Object.entries(object.tableStatsChange ?? {}).reduce<{ [key: number]: TableStats }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableStats.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseReportCompactionTasksRequest_TableStatsChangeEntry(): ReportCompactionTasksRequest_TableStatsChangeEntry { - return { key: 0, value: undefined }; -} - -export const ReportCompactionTasksRequest_TableStatsChangeEntry = { - fromJSON(object: any): ReportCompactionTasksRequest_TableStatsChangeEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableStats.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: ReportCompactionTasksRequest_TableStatsChangeEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? TableStats.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ReportCompactionTasksRequest_TableStatsChangeEntry { - const message = createBaseReportCompactionTasksRequest_TableStatsChangeEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableStats.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseReportCompactionTasksResponse(): ReportCompactionTasksResponse { - return { status: undefined }; -} - -export const ReportCompactionTasksResponse = { - fromJSON(object: any): ReportCompactionTasksResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: ReportCompactionTasksResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ReportCompactionTasksResponse { - const message = createBaseReportCompactionTasksResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseHummockPinnedVersion(): HummockPinnedVersion { - return { contextId: 0, minPinnedId: 0 }; -} - -export const HummockPinnedVersion = { - fromJSON(object: any): HummockPinnedVersion { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - minPinnedId: isSet(object.minPinnedId) ? Number(object.minPinnedId) : 0, - }; - }, - - toJSON(message: HummockPinnedVersion): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.minPinnedId !== undefined && (obj.minPinnedId = Math.round(message.minPinnedId)); - return obj; - }, - - fromPartial, I>>(object: I): HummockPinnedVersion { - const message = createBaseHummockPinnedVersion(); - message.contextId = object.contextId ?? 0; - message.minPinnedId = object.minPinnedId ?? 0; - return message; - }, -}; - -function createBaseHummockPinnedSnapshot(): HummockPinnedSnapshot { - return { contextId: 0, minimalPinnedSnapshot: 0 }; -} - -export const HummockPinnedSnapshot = { - fromJSON(object: any): HummockPinnedSnapshot { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - minimalPinnedSnapshot: isSet(object.minimalPinnedSnapshot) ? Number(object.minimalPinnedSnapshot) : 0, - }; - }, - - toJSON(message: HummockPinnedSnapshot): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.minimalPinnedSnapshot !== undefined && - (obj.minimalPinnedSnapshot = Math.round(message.minimalPinnedSnapshot)); - return obj; - }, - - fromPartial, I>>(object: I): HummockPinnedSnapshot { - const message = createBaseHummockPinnedSnapshot(); - message.contextId = object.contextId ?? 0; - message.minimalPinnedSnapshot = object.minimalPinnedSnapshot ?? 0; - return message; - }, -}; - -function createBaseGetNewSstIdsRequest(): GetNewSstIdsRequest { - return { number: 0 }; -} - -export const GetNewSstIdsRequest = { - fromJSON(object: any): GetNewSstIdsRequest { - return { number: isSet(object.number) ? Number(object.number) : 0 }; - }, - - toJSON(message: GetNewSstIdsRequest): unknown { - const obj: any = {}; - message.number !== undefined && (obj.number = Math.round(message.number)); - return obj; - }, - - fromPartial, I>>(object: I): GetNewSstIdsRequest { - const message = createBaseGetNewSstIdsRequest(); - message.number = object.number ?? 0; - return message; - }, -}; - -function createBaseGetNewSstIdsResponse(): GetNewSstIdsResponse { - return { status: undefined, startId: 0, endId: 0 }; -} - -export const GetNewSstIdsResponse = { - fromJSON(object: any): GetNewSstIdsResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - startId: isSet(object.startId) ? Number(object.startId) : 0, - endId: isSet(object.endId) ? Number(object.endId) : 0, - }; - }, - - toJSON(message: GetNewSstIdsResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.startId !== undefined && (obj.startId = Math.round(message.startId)); - message.endId !== undefined && (obj.endId = Math.round(message.endId)); - return obj; - }, - - fromPartial, I>>(object: I): GetNewSstIdsResponse { - const message = createBaseGetNewSstIdsResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.startId = object.startId ?? 0; - message.endId = object.endId ?? 0; - return message; - }, -}; - -function createBaseCompactTaskProgress(): CompactTaskProgress { - return { taskId: 0, numSstsSealed: 0, numSstsUploaded: 0 }; -} - -export const CompactTaskProgress = { - fromJSON(object: any): CompactTaskProgress { - return { - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - numSstsSealed: isSet(object.numSstsSealed) ? Number(object.numSstsSealed) : 0, - numSstsUploaded: isSet(object.numSstsUploaded) ? Number(object.numSstsUploaded) : 0, - }; - }, - - toJSON(message: CompactTaskProgress): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - message.numSstsSealed !== undefined && (obj.numSstsSealed = Math.round(message.numSstsSealed)); - message.numSstsUploaded !== undefined && (obj.numSstsUploaded = Math.round(message.numSstsUploaded)); - return obj; - }, - - fromPartial, I>>(object: I): CompactTaskProgress { - const message = createBaseCompactTaskProgress(); - message.taskId = object.taskId ?? 0; - message.numSstsSealed = object.numSstsSealed ?? 0; - message.numSstsUploaded = object.numSstsUploaded ?? 0; - return message; - }, -}; - -function createBaseReportCompactionTaskProgressRequest(): ReportCompactionTaskProgressRequest { - return { contextId: 0, progress: [] }; -} - -export const ReportCompactionTaskProgressRequest = { - fromJSON(object: any): ReportCompactionTaskProgressRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - progress: Array.isArray(object?.progress) ? object.progress.map((e: any) => CompactTaskProgress.fromJSON(e)) : [], - }; - }, - - toJSON(message: ReportCompactionTaskProgressRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - if (message.progress) { - obj.progress = message.progress.map((e) => e ? CompactTaskProgress.toJSON(e) : undefined); - } else { - obj.progress = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): ReportCompactionTaskProgressRequest { - const message = createBaseReportCompactionTaskProgressRequest(); - message.contextId = object.contextId ?? 0; - message.progress = object.progress?.map((e) => CompactTaskProgress.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseReportCompactionTaskProgressResponse(): ReportCompactionTaskProgressResponse { - return { status: undefined }; -} - -export const ReportCompactionTaskProgressResponse = { - fromJSON(object: any): ReportCompactionTaskProgressResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: ReportCompactionTaskProgressResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ReportCompactionTaskProgressResponse { - const message = createBaseReportCompactionTaskProgressResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseSubscribeCompactTasksRequest(): SubscribeCompactTasksRequest { - return { contextId: 0, maxConcurrentTaskNumber: 0 }; -} - -export const SubscribeCompactTasksRequest = { - fromJSON(object: any): SubscribeCompactTasksRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - maxConcurrentTaskNumber: isSet(object.maxConcurrentTaskNumber) ? Number(object.maxConcurrentTaskNumber) : 0, - }; - }, - - toJSON(message: SubscribeCompactTasksRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.maxConcurrentTaskNumber !== undefined && - (obj.maxConcurrentTaskNumber = Math.round(message.maxConcurrentTaskNumber)); - return obj; - }, - - fromPartial, I>>(object: I): SubscribeCompactTasksRequest { - const message = createBaseSubscribeCompactTasksRequest(); - message.contextId = object.contextId ?? 0; - message.maxConcurrentTaskNumber = object.maxConcurrentTaskNumber ?? 0; - return message; - }, -}; - -function createBaseValidationTask(): ValidationTask { - return { sstInfos: [], sstIdToWorkerId: {}, epoch: 0 }; -} - -export const ValidationTask = { - fromJSON(object: any): ValidationTask { - return { - sstInfos: Array.isArray(object?.sstInfos) ? object.sstInfos.map((e: any) => SstableInfo.fromJSON(e)) : [], - sstIdToWorkerId: isObject(object.sstIdToWorkerId) - ? Object.entries(object.sstIdToWorkerId).reduce<{ [key: number]: number }>((acc, [key, value]) => { - acc[Number(key)] = Number(value); - return acc; - }, {}) - : {}, - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - }; - }, - - toJSON(message: ValidationTask): unknown { - const obj: any = {}; - if (message.sstInfos) { - obj.sstInfos = message.sstInfos.map((e) => e ? SstableInfo.toJSON(e) : undefined); - } else { - obj.sstInfos = []; - } - obj.sstIdToWorkerId = {}; - if (message.sstIdToWorkerId) { - Object.entries(message.sstIdToWorkerId).forEach(([k, v]) => { - obj.sstIdToWorkerId[k] = Math.round(v); - }); - } - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): ValidationTask { - const message = createBaseValidationTask(); - message.sstInfos = object.sstInfos?.map((e) => SstableInfo.fromPartial(e)) || []; - message.sstIdToWorkerId = Object.entries(object.sstIdToWorkerId ?? {}).reduce<{ [key: number]: number }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = Number(value); - } - return acc; - }, - {}, - ); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseValidationTask_SstIdToWorkerIdEntry(): ValidationTask_SstIdToWorkerIdEntry { - return { key: 0, value: 0 }; -} - -export const ValidationTask_SstIdToWorkerIdEntry = { - fromJSON(object: any): ValidationTask_SstIdToWorkerIdEntry { - return { key: isSet(object.key) ? Number(object.key) : 0, value: isSet(object.value) ? Number(object.value) : 0 }; - }, - - toJSON(message: ValidationTask_SstIdToWorkerIdEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = Math.round(message.value)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ValidationTask_SstIdToWorkerIdEntry { - const message = createBaseValidationTask_SstIdToWorkerIdEntry(); - message.key = object.key ?? 0; - message.value = object.value ?? 0; - return message; - }, -}; - -function createBaseSubscribeCompactTasksResponse(): SubscribeCompactTasksResponse { - return { task: undefined }; -} - -export const SubscribeCompactTasksResponse = { - fromJSON(object: any): SubscribeCompactTasksResponse { - return { - task: isSet(object.compactTask) - ? { $case: "compactTask", compactTask: CompactTask.fromJSON(object.compactTask) } - : isSet(object.vacuumTask) - ? { $case: "vacuumTask", vacuumTask: VacuumTask.fromJSON(object.vacuumTask) } - : isSet(object.fullScanTask) - ? { $case: "fullScanTask", fullScanTask: FullScanTask.fromJSON(object.fullScanTask) } - : isSet(object.validationTask) - ? { $case: "validationTask", validationTask: ValidationTask.fromJSON(object.validationTask) } - : isSet(object.cancelCompactTask) - ? { $case: "cancelCompactTask", cancelCompactTask: CancelCompactTask.fromJSON(object.cancelCompactTask) } - : undefined, - }; - }, - - toJSON(message: SubscribeCompactTasksResponse): unknown { - const obj: any = {}; - message.task?.$case === "compactTask" && - (obj.compactTask = message.task?.compactTask ? CompactTask.toJSON(message.task?.compactTask) : undefined); - message.task?.$case === "vacuumTask" && - (obj.vacuumTask = message.task?.vacuumTask ? VacuumTask.toJSON(message.task?.vacuumTask) : undefined); - message.task?.$case === "fullScanTask" && - (obj.fullScanTask = message.task?.fullScanTask ? FullScanTask.toJSON(message.task?.fullScanTask) : undefined); - message.task?.$case === "validationTask" && (obj.validationTask = message.task?.validationTask - ? ValidationTask.toJSON(message.task?.validationTask) - : undefined); - message.task?.$case === "cancelCompactTask" && (obj.cancelCompactTask = message.task?.cancelCompactTask - ? CancelCompactTask.toJSON(message.task?.cancelCompactTask) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SubscribeCompactTasksResponse { - const message = createBaseSubscribeCompactTasksResponse(); - if ( - object.task?.$case === "compactTask" && - object.task?.compactTask !== undefined && - object.task?.compactTask !== null - ) { - message.task = { $case: "compactTask", compactTask: CompactTask.fromPartial(object.task.compactTask) }; - } - if ( - object.task?.$case === "vacuumTask" && object.task?.vacuumTask !== undefined && object.task?.vacuumTask !== null - ) { - message.task = { $case: "vacuumTask", vacuumTask: VacuumTask.fromPartial(object.task.vacuumTask) }; - } - if ( - object.task?.$case === "fullScanTask" && - object.task?.fullScanTask !== undefined && - object.task?.fullScanTask !== null - ) { - message.task = { $case: "fullScanTask", fullScanTask: FullScanTask.fromPartial(object.task.fullScanTask) }; - } - if ( - object.task?.$case === "validationTask" && - object.task?.validationTask !== undefined && - object.task?.validationTask !== null - ) { - message.task = { - $case: "validationTask", - validationTask: ValidationTask.fromPartial(object.task.validationTask), - }; - } - if ( - object.task?.$case === "cancelCompactTask" && - object.task?.cancelCompactTask !== undefined && - object.task?.cancelCompactTask !== null - ) { - message.task = { - $case: "cancelCompactTask", - cancelCompactTask: CancelCompactTask.fromPartial(object.task.cancelCompactTask), - }; - } - return message; - }, -}; - -function createBaseVacuumTask(): VacuumTask { - return { sstableIds: [] }; -} - -export const VacuumTask = { - fromJSON(object: any): VacuumTask { - return { sstableIds: Array.isArray(object?.sstableIds) ? object.sstableIds.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: VacuumTask): unknown { - const obj: any = {}; - if (message.sstableIds) { - obj.sstableIds = message.sstableIds.map((e) => Math.round(e)); - } else { - obj.sstableIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): VacuumTask { - const message = createBaseVacuumTask(); - message.sstableIds = object.sstableIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseFullScanTask(): FullScanTask { - return { sstRetentionTimeSec: 0 }; -} - -export const FullScanTask = { - fromJSON(object: any): FullScanTask { - return { sstRetentionTimeSec: isSet(object.sstRetentionTimeSec) ? Number(object.sstRetentionTimeSec) : 0 }; - }, - - toJSON(message: FullScanTask): unknown { - const obj: any = {}; - message.sstRetentionTimeSec !== undefined && (obj.sstRetentionTimeSec = Math.round(message.sstRetentionTimeSec)); - return obj; - }, - - fromPartial, I>>(object: I): FullScanTask { - const message = createBaseFullScanTask(); - message.sstRetentionTimeSec = object.sstRetentionTimeSec ?? 0; - return message; - }, -}; - -function createBaseCancelCompactTask(): CancelCompactTask { - return { contextId: 0, taskId: 0 }; -} - -export const CancelCompactTask = { - fromJSON(object: any): CancelCompactTask { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - }; - }, - - toJSON(message: CancelCompactTask): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - return obj; - }, - - fromPartial, I>>(object: I): CancelCompactTask { - const message = createBaseCancelCompactTask(); - message.contextId = object.contextId ?? 0; - message.taskId = object.taskId ?? 0; - return message; - }, -}; - -function createBaseReportVacuumTaskRequest(): ReportVacuumTaskRequest { - return { vacuumTask: undefined }; -} - -export const ReportVacuumTaskRequest = { - fromJSON(object: any): ReportVacuumTaskRequest { - return { vacuumTask: isSet(object.vacuumTask) ? VacuumTask.fromJSON(object.vacuumTask) : undefined }; - }, - - toJSON(message: ReportVacuumTaskRequest): unknown { - const obj: any = {}; - message.vacuumTask !== undefined && - (obj.vacuumTask = message.vacuumTask ? VacuumTask.toJSON(message.vacuumTask) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ReportVacuumTaskRequest { - const message = createBaseReportVacuumTaskRequest(); - message.vacuumTask = (object.vacuumTask !== undefined && object.vacuumTask !== null) - ? VacuumTask.fromPartial(object.vacuumTask) - : undefined; - return message; - }, -}; - -function createBaseReportVacuumTaskResponse(): ReportVacuumTaskResponse { - return { status: undefined }; -} - -export const ReportVacuumTaskResponse = { - fromJSON(object: any): ReportVacuumTaskResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: ReportVacuumTaskResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ReportVacuumTaskResponse { - const message = createBaseReportVacuumTaskResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseTriggerManualCompactionRequest(): TriggerManualCompactionRequest { - return { compactionGroupId: 0, keyRange: undefined, tableId: 0, level: 0, sstIds: [] }; -} - -export const TriggerManualCompactionRequest = { - fromJSON(object: any): TriggerManualCompactionRequest { - return { - compactionGroupId: isSet(object.compactionGroupId) ? Number(object.compactionGroupId) : 0, - keyRange: isSet(object.keyRange) ? KeyRange.fromJSON(object.keyRange) : undefined, - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - level: isSet(object.level) ? Number(object.level) : 0, - sstIds: Array.isArray(object?.sstIds) ? object.sstIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: TriggerManualCompactionRequest): unknown { - const obj: any = {}; - message.compactionGroupId !== undefined && (obj.compactionGroupId = Math.round(message.compactionGroupId)); - message.keyRange !== undefined && (obj.keyRange = message.keyRange ? KeyRange.toJSON(message.keyRange) : undefined); - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.level !== undefined && (obj.level = Math.round(message.level)); - if (message.sstIds) { - obj.sstIds = message.sstIds.map((e) => Math.round(e)); - } else { - obj.sstIds = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): TriggerManualCompactionRequest { - const message = createBaseTriggerManualCompactionRequest(); - message.compactionGroupId = object.compactionGroupId ?? 0; - message.keyRange = (object.keyRange !== undefined && object.keyRange !== null) - ? KeyRange.fromPartial(object.keyRange) - : undefined; - message.tableId = object.tableId ?? 0; - message.level = object.level ?? 0; - message.sstIds = object.sstIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTriggerManualCompactionResponse(): TriggerManualCompactionResponse { - return { status: undefined }; -} - -export const TriggerManualCompactionResponse = { - fromJSON(object: any): TriggerManualCompactionResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: TriggerManualCompactionResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): TriggerManualCompactionResponse { - const message = createBaseTriggerManualCompactionResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseReportFullScanTaskRequest(): ReportFullScanTaskRequest { - return { sstIds: [] }; -} - -export const ReportFullScanTaskRequest = { - fromJSON(object: any): ReportFullScanTaskRequest { - return { sstIds: Array.isArray(object?.sstIds) ? object.sstIds.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: ReportFullScanTaskRequest): unknown { - const obj: any = {}; - if (message.sstIds) { - obj.sstIds = message.sstIds.map((e) => Math.round(e)); - } else { - obj.sstIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ReportFullScanTaskRequest { - const message = createBaseReportFullScanTaskRequest(); - message.sstIds = object.sstIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseReportFullScanTaskResponse(): ReportFullScanTaskResponse { - return { status: undefined }; -} - -export const ReportFullScanTaskResponse = { - fromJSON(object: any): ReportFullScanTaskResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: ReportFullScanTaskResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ReportFullScanTaskResponse { - const message = createBaseReportFullScanTaskResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseTriggerFullGCRequest(): TriggerFullGCRequest { - return { sstRetentionTimeSec: 0 }; -} - -export const TriggerFullGCRequest = { - fromJSON(object: any): TriggerFullGCRequest { - return { sstRetentionTimeSec: isSet(object.sstRetentionTimeSec) ? Number(object.sstRetentionTimeSec) : 0 }; - }, - - toJSON(message: TriggerFullGCRequest): unknown { - const obj: any = {}; - message.sstRetentionTimeSec !== undefined && (obj.sstRetentionTimeSec = Math.round(message.sstRetentionTimeSec)); - return obj; - }, - - fromPartial, I>>(object: I): TriggerFullGCRequest { - const message = createBaseTriggerFullGCRequest(); - message.sstRetentionTimeSec = object.sstRetentionTimeSec ?? 0; - return message; - }, -}; - -function createBaseTriggerFullGCResponse(): TriggerFullGCResponse { - return { status: undefined }; -} - -export const TriggerFullGCResponse = { - fromJSON(object: any): TriggerFullGCResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: TriggerFullGCResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): TriggerFullGCResponse { - const message = createBaseTriggerFullGCResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseListVersionDeltasRequest(): ListVersionDeltasRequest { - return { startId: 0, numLimit: 0, committedEpochLimit: 0 }; -} - -export const ListVersionDeltasRequest = { - fromJSON(object: any): ListVersionDeltasRequest { - return { - startId: isSet(object.startId) ? Number(object.startId) : 0, - numLimit: isSet(object.numLimit) ? Number(object.numLimit) : 0, - committedEpochLimit: isSet(object.committedEpochLimit) ? Number(object.committedEpochLimit) : 0, - }; - }, - - toJSON(message: ListVersionDeltasRequest): unknown { - const obj: any = {}; - message.startId !== undefined && (obj.startId = Math.round(message.startId)); - message.numLimit !== undefined && (obj.numLimit = Math.round(message.numLimit)); - message.committedEpochLimit !== undefined && (obj.committedEpochLimit = Math.round(message.committedEpochLimit)); - return obj; - }, - - fromPartial, I>>(object: I): ListVersionDeltasRequest { - const message = createBaseListVersionDeltasRequest(); - message.startId = object.startId ?? 0; - message.numLimit = object.numLimit ?? 0; - message.committedEpochLimit = object.committedEpochLimit ?? 0; - return message; - }, -}; - -function createBaseListVersionDeltasResponse(): ListVersionDeltasResponse { - return { versionDeltas: undefined }; -} - -export const ListVersionDeltasResponse = { - fromJSON(object: any): ListVersionDeltasResponse { - return { - versionDeltas: isSet(object.versionDeltas) ? HummockVersionDeltas.fromJSON(object.versionDeltas) : undefined, - }; - }, - - toJSON(message: ListVersionDeltasResponse): unknown { - const obj: any = {}; - message.versionDeltas !== undefined && - (obj.versionDeltas = message.versionDeltas ? HummockVersionDeltas.toJSON(message.versionDeltas) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ListVersionDeltasResponse { - const message = createBaseListVersionDeltasResponse(); - message.versionDeltas = (object.versionDeltas !== undefined && object.versionDeltas !== null) - ? HummockVersionDeltas.fromPartial(object.versionDeltas) - : undefined; - return message; - }, -}; - -function createBasePinnedVersionsSummary(): PinnedVersionsSummary { - return { pinnedVersions: [], workers: {} }; -} - -export const PinnedVersionsSummary = { - fromJSON(object: any): PinnedVersionsSummary { - return { - pinnedVersions: Array.isArray(object?.pinnedVersions) - ? object.pinnedVersions.map((e: any) => HummockPinnedVersion.fromJSON(e)) - : [], - workers: isObject(object.workers) - ? Object.entries(object.workers).reduce<{ [key: number]: WorkerNode }>((acc, [key, value]) => { - acc[Number(key)] = WorkerNode.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: PinnedVersionsSummary): unknown { - const obj: any = {}; - if (message.pinnedVersions) { - obj.pinnedVersions = message.pinnedVersions.map((e) => e ? HummockPinnedVersion.toJSON(e) : undefined); - } else { - obj.pinnedVersions = []; - } - obj.workers = {}; - if (message.workers) { - Object.entries(message.workers).forEach(([k, v]) => { - obj.workers[k] = WorkerNode.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): PinnedVersionsSummary { - const message = createBasePinnedVersionsSummary(); - message.pinnedVersions = object.pinnedVersions?.map((e) => HummockPinnedVersion.fromPartial(e)) || []; - message.workers = Object.entries(object.workers ?? {}).reduce<{ [key: number]: WorkerNode }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = WorkerNode.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBasePinnedVersionsSummary_WorkersEntry(): PinnedVersionsSummary_WorkersEntry { - return { key: 0, value: undefined }; -} - -export const PinnedVersionsSummary_WorkersEntry = { - fromJSON(object: any): PinnedVersionsSummary_WorkersEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? WorkerNode.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: PinnedVersionsSummary_WorkersEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? WorkerNode.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): PinnedVersionsSummary_WorkersEntry { - const message = createBasePinnedVersionsSummary_WorkersEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? WorkerNode.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBasePinnedSnapshotsSummary(): PinnedSnapshotsSummary { - return { pinnedSnapshots: [], workers: {} }; -} - -export const PinnedSnapshotsSummary = { - fromJSON(object: any): PinnedSnapshotsSummary { - return { - pinnedSnapshots: Array.isArray(object?.pinnedSnapshots) - ? object.pinnedSnapshots.map((e: any) => HummockPinnedSnapshot.fromJSON(e)) - : [], - workers: isObject(object.workers) - ? Object.entries(object.workers).reduce<{ [key: number]: WorkerNode }>((acc, [key, value]) => { - acc[Number(key)] = WorkerNode.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: PinnedSnapshotsSummary): unknown { - const obj: any = {}; - if (message.pinnedSnapshots) { - obj.pinnedSnapshots = message.pinnedSnapshots.map((e) => e ? HummockPinnedSnapshot.toJSON(e) : undefined); - } else { - obj.pinnedSnapshots = []; - } - obj.workers = {}; - if (message.workers) { - Object.entries(message.workers).forEach(([k, v]) => { - obj.workers[k] = WorkerNode.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): PinnedSnapshotsSummary { - const message = createBasePinnedSnapshotsSummary(); - message.pinnedSnapshots = object.pinnedSnapshots?.map((e) => HummockPinnedSnapshot.fromPartial(e)) || []; - message.workers = Object.entries(object.workers ?? {}).reduce<{ [key: number]: WorkerNode }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = WorkerNode.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBasePinnedSnapshotsSummary_WorkersEntry(): PinnedSnapshotsSummary_WorkersEntry { - return { key: 0, value: undefined }; -} - -export const PinnedSnapshotsSummary_WorkersEntry = { - fromJSON(object: any): PinnedSnapshotsSummary_WorkersEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? WorkerNode.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: PinnedSnapshotsSummary_WorkersEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? WorkerNode.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): PinnedSnapshotsSummary_WorkersEntry { - const message = createBasePinnedSnapshotsSummary_WorkersEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? WorkerNode.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseRiseCtlGetPinnedVersionsSummaryRequest(): RiseCtlGetPinnedVersionsSummaryRequest { - return {}; -} - -export const RiseCtlGetPinnedVersionsSummaryRequest = { - fromJSON(_: any): RiseCtlGetPinnedVersionsSummaryRequest { - return {}; - }, - - toJSON(_: RiseCtlGetPinnedVersionsSummaryRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): RiseCtlGetPinnedVersionsSummaryRequest { - const message = createBaseRiseCtlGetPinnedVersionsSummaryRequest(); - return message; - }, -}; - -function createBaseRiseCtlGetPinnedVersionsSummaryResponse(): RiseCtlGetPinnedVersionsSummaryResponse { - return { summary: undefined }; -} - -export const RiseCtlGetPinnedVersionsSummaryResponse = { - fromJSON(object: any): RiseCtlGetPinnedVersionsSummaryResponse { - return { summary: isSet(object.summary) ? PinnedVersionsSummary.fromJSON(object.summary) : undefined }; - }, - - toJSON(message: RiseCtlGetPinnedVersionsSummaryResponse): unknown { - const obj: any = {}; - message.summary !== undefined && - (obj.summary = message.summary ? PinnedVersionsSummary.toJSON(message.summary) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlGetPinnedVersionsSummaryResponse { - const message = createBaseRiseCtlGetPinnedVersionsSummaryResponse(); - message.summary = (object.summary !== undefined && object.summary !== null) - ? PinnedVersionsSummary.fromPartial(object.summary) - : undefined; - return message; - }, -}; - -function createBaseRiseCtlGetPinnedSnapshotsSummaryRequest(): RiseCtlGetPinnedSnapshotsSummaryRequest { - return {}; -} - -export const RiseCtlGetPinnedSnapshotsSummaryRequest = { - fromJSON(_: any): RiseCtlGetPinnedSnapshotsSummaryRequest { - return {}; - }, - - toJSON(_: RiseCtlGetPinnedSnapshotsSummaryRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): RiseCtlGetPinnedSnapshotsSummaryRequest { - const message = createBaseRiseCtlGetPinnedSnapshotsSummaryRequest(); - return message; - }, -}; - -function createBaseRiseCtlGetPinnedSnapshotsSummaryResponse(): RiseCtlGetPinnedSnapshotsSummaryResponse { - return { summary: undefined }; -} - -export const RiseCtlGetPinnedSnapshotsSummaryResponse = { - fromJSON(object: any): RiseCtlGetPinnedSnapshotsSummaryResponse { - return { summary: isSet(object.summary) ? PinnedSnapshotsSummary.fromJSON(object.summary) : undefined }; - }, - - toJSON(message: RiseCtlGetPinnedSnapshotsSummaryResponse): unknown { - const obj: any = {}; - message.summary !== undefined && - (obj.summary = message.summary ? PinnedSnapshotsSummary.toJSON(message.summary) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlGetPinnedSnapshotsSummaryResponse { - const message = createBaseRiseCtlGetPinnedSnapshotsSummaryResponse(); - message.summary = (object.summary !== undefined && object.summary !== null) - ? PinnedSnapshotsSummary.fromPartial(object.summary) - : undefined; - return message; - }, -}; - -function createBaseInitMetadataForReplayRequest(): InitMetadataForReplayRequest { - return { tables: [], compactionGroups: [] }; -} - -export const InitMetadataForReplayRequest = { - fromJSON(object: any): InitMetadataForReplayRequest { - return { - tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => Table.fromJSON(e)) : [], - compactionGroups: Array.isArray(object?.compactionGroups) - ? object.compactionGroups.map((e: any) => CompactionGroupInfo.fromJSON(e)) - : [], - }; - }, - - toJSON(message: InitMetadataForReplayRequest): unknown { - const obj: any = {}; - if (message.tables) { - obj.tables = message.tables.map((e) => e ? Table.toJSON(e) : undefined); - } else { - obj.tables = []; - } - if (message.compactionGroups) { - obj.compactionGroups = message.compactionGroups.map((e) => e ? CompactionGroupInfo.toJSON(e) : undefined); - } else { - obj.compactionGroups = []; - } - return obj; - }, - - fromPartial, I>>(object: I): InitMetadataForReplayRequest { - const message = createBaseInitMetadataForReplayRequest(); - message.tables = object.tables?.map((e) => Table.fromPartial(e)) || []; - message.compactionGroups = object.compactionGroups?.map((e) => CompactionGroupInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseInitMetadataForReplayResponse(): InitMetadataForReplayResponse { - return {}; -} - -export const InitMetadataForReplayResponse = { - fromJSON(_: any): InitMetadataForReplayResponse { - return {}; - }, - - toJSON(_: InitMetadataForReplayResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): InitMetadataForReplayResponse { - const message = createBaseInitMetadataForReplayResponse(); - return message; - }, -}; - -function createBaseReplayVersionDeltaRequest(): ReplayVersionDeltaRequest { - return { versionDelta: undefined }; -} - -export const ReplayVersionDeltaRequest = { - fromJSON(object: any): ReplayVersionDeltaRequest { - return { versionDelta: isSet(object.versionDelta) ? HummockVersionDelta.fromJSON(object.versionDelta) : undefined }; - }, - - toJSON(message: ReplayVersionDeltaRequest): unknown { - const obj: any = {}; - message.versionDelta !== undefined && - (obj.versionDelta = message.versionDelta ? HummockVersionDelta.toJSON(message.versionDelta) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ReplayVersionDeltaRequest { - const message = createBaseReplayVersionDeltaRequest(); - message.versionDelta = (object.versionDelta !== undefined && object.versionDelta !== null) - ? HummockVersionDelta.fromPartial(object.versionDelta) - : undefined; - return message; - }, -}; - -function createBaseReplayVersionDeltaResponse(): ReplayVersionDeltaResponse { - return { version: undefined, modifiedCompactionGroups: [] }; -} - -export const ReplayVersionDeltaResponse = { - fromJSON(object: any): ReplayVersionDeltaResponse { - return { - version: isSet(object.version) ? HummockVersion.fromJSON(object.version) : undefined, - modifiedCompactionGroups: Array.isArray(object?.modifiedCompactionGroups) - ? object.modifiedCompactionGroups.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: ReplayVersionDeltaResponse): unknown { - const obj: any = {}; - message.version !== undefined && - (obj.version = message.version ? HummockVersion.toJSON(message.version) : undefined); - if (message.modifiedCompactionGroups) { - obj.modifiedCompactionGroups = message.modifiedCompactionGroups.map((e) => Math.round(e)); - } else { - obj.modifiedCompactionGroups = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ReplayVersionDeltaResponse { - const message = createBaseReplayVersionDeltaResponse(); - message.version = (object.version !== undefined && object.version !== null) - ? HummockVersion.fromPartial(object.version) - : undefined; - message.modifiedCompactionGroups = object.modifiedCompactionGroups?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTriggerCompactionDeterministicRequest(): TriggerCompactionDeterministicRequest { - return { versionId: 0, compactionGroups: [] }; -} - -export const TriggerCompactionDeterministicRequest = { - fromJSON(object: any): TriggerCompactionDeterministicRequest { - return { - versionId: isSet(object.versionId) ? Number(object.versionId) : 0, - compactionGroups: Array.isArray(object?.compactionGroups) - ? object.compactionGroups.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: TriggerCompactionDeterministicRequest): unknown { - const obj: any = {}; - message.versionId !== undefined && (obj.versionId = Math.round(message.versionId)); - if (message.compactionGroups) { - obj.compactionGroups = message.compactionGroups.map((e) => Math.round(e)); - } else { - obj.compactionGroups = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): TriggerCompactionDeterministicRequest { - const message = createBaseTriggerCompactionDeterministicRequest(); - message.versionId = object.versionId ?? 0; - message.compactionGroups = object.compactionGroups?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTriggerCompactionDeterministicResponse(): TriggerCompactionDeterministicResponse { - return {}; -} - -export const TriggerCompactionDeterministicResponse = { - fromJSON(_: any): TriggerCompactionDeterministicResponse { - return {}; - }, - - toJSON(_: TriggerCompactionDeterministicResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): TriggerCompactionDeterministicResponse { - const message = createBaseTriggerCompactionDeterministicResponse(); - return message; - }, -}; - -function createBaseDisableCommitEpochRequest(): DisableCommitEpochRequest { - return {}; -} - -export const DisableCommitEpochRequest = { - fromJSON(_: any): DisableCommitEpochRequest { - return {}; - }, - - toJSON(_: DisableCommitEpochRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): DisableCommitEpochRequest { - const message = createBaseDisableCommitEpochRequest(); - return message; - }, -}; - -function createBaseDisableCommitEpochResponse(): DisableCommitEpochResponse { - return { currentVersion: undefined }; -} - -export const DisableCommitEpochResponse = { - fromJSON(object: any): DisableCommitEpochResponse { - return { - currentVersion: isSet(object.currentVersion) ? HummockVersion.fromJSON(object.currentVersion) : undefined, - }; - }, - - toJSON(message: DisableCommitEpochResponse): unknown { - const obj: any = {}; - message.currentVersion !== undefined && - (obj.currentVersion = message.currentVersion ? HummockVersion.toJSON(message.currentVersion) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): DisableCommitEpochResponse { - const message = createBaseDisableCommitEpochResponse(); - message.currentVersion = (object.currentVersion !== undefined && object.currentVersion !== null) - ? HummockVersion.fromPartial(object.currentVersion) - : undefined; - return message; - }, -}; - -function createBaseRiseCtlListCompactionGroupRequest(): RiseCtlListCompactionGroupRequest { - return {}; -} - -export const RiseCtlListCompactionGroupRequest = { - fromJSON(_: any): RiseCtlListCompactionGroupRequest { - return {}; - }, - - toJSON(_: RiseCtlListCompactionGroupRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): RiseCtlListCompactionGroupRequest { - const message = createBaseRiseCtlListCompactionGroupRequest(); - return message; - }, -}; - -function createBaseRiseCtlListCompactionGroupResponse(): RiseCtlListCompactionGroupResponse { - return { status: undefined, compactionGroups: [] }; -} - -export const RiseCtlListCompactionGroupResponse = { - fromJSON(object: any): RiseCtlListCompactionGroupResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - compactionGroups: Array.isArray(object?.compactionGroups) - ? object.compactionGroups.map((e: any) => CompactionGroupInfo.fromJSON(e)) - : [], - }; - }, - - toJSON(message: RiseCtlListCompactionGroupResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - if (message.compactionGroups) { - obj.compactionGroups = message.compactionGroups.map((e) => e ? CompactionGroupInfo.toJSON(e) : undefined); - } else { - obj.compactionGroups = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlListCompactionGroupResponse { - const message = createBaseRiseCtlListCompactionGroupResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.compactionGroups = object.compactionGroups?.map((e) => CompactionGroupInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseRiseCtlUpdateCompactionConfigRequest(): RiseCtlUpdateCompactionConfigRequest { - return { compactionGroupIds: [], configs: [] }; -} - -export const RiseCtlUpdateCompactionConfigRequest = { - fromJSON(object: any): RiseCtlUpdateCompactionConfigRequest { - return { - compactionGroupIds: Array.isArray(object?.compactionGroupIds) - ? object.compactionGroupIds.map((e: any) => Number(e)) - : [], - configs: Array.isArray(object?.configs) - ? object.configs.map((e: any) => RiseCtlUpdateCompactionConfigRequest_MutableConfig.fromJSON(e)) - : [], - }; - }, - - toJSON(message: RiseCtlUpdateCompactionConfigRequest): unknown { - const obj: any = {}; - if (message.compactionGroupIds) { - obj.compactionGroupIds = message.compactionGroupIds.map((e) => Math.round(e)); - } else { - obj.compactionGroupIds = []; - } - if (message.configs) { - obj.configs = message.configs.map((e) => - e ? RiseCtlUpdateCompactionConfigRequest_MutableConfig.toJSON(e) : undefined - ); - } else { - obj.configs = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlUpdateCompactionConfigRequest { - const message = createBaseRiseCtlUpdateCompactionConfigRequest(); - message.compactionGroupIds = object.compactionGroupIds?.map((e) => e) || []; - message.configs = object.configs?.map((e) => RiseCtlUpdateCompactionConfigRequest_MutableConfig.fromPartial(e)) || - []; - return message; - }, -}; - -function createBaseRiseCtlUpdateCompactionConfigRequest_MutableConfig(): RiseCtlUpdateCompactionConfigRequest_MutableConfig { - return { mutableConfig: undefined }; -} - -export const RiseCtlUpdateCompactionConfigRequest_MutableConfig = { - fromJSON(object: any): RiseCtlUpdateCompactionConfigRequest_MutableConfig { - return { - mutableConfig: isSet(object.maxBytesForLevelBase) - ? { $case: "maxBytesForLevelBase", maxBytesForLevelBase: Number(object.maxBytesForLevelBase) } - : isSet(object.maxBytesForLevelMultiplier) - ? { $case: "maxBytesForLevelMultiplier", maxBytesForLevelMultiplier: Number(object.maxBytesForLevelMultiplier) } - : isSet(object.maxCompactionBytes) - ? { $case: "maxCompactionBytes", maxCompactionBytes: Number(object.maxCompactionBytes) } - : isSet(object.subLevelMaxCompactionBytes) - ? { $case: "subLevelMaxCompactionBytes", subLevelMaxCompactionBytes: Number(object.subLevelMaxCompactionBytes) } - : isSet(object.level0TierCompactFileNumber) - ? { - $case: "level0TierCompactFileNumber", - level0TierCompactFileNumber: Number(object.level0TierCompactFileNumber), - } - : isSet(object.targetFileSizeBase) - ? { $case: "targetFileSizeBase", targetFileSizeBase: Number(object.targetFileSizeBase) } - : isSet(object.compactionFilterMask) - ? { $case: "compactionFilterMask", compactionFilterMask: Number(object.compactionFilterMask) } - : isSet(object.maxSubCompaction) - ? { $case: "maxSubCompaction", maxSubCompaction: Number(object.maxSubCompaction) } - : undefined, - }; - }, - - toJSON(message: RiseCtlUpdateCompactionConfigRequest_MutableConfig): unknown { - const obj: any = {}; - message.mutableConfig?.$case === "maxBytesForLevelBase" && - (obj.maxBytesForLevelBase = Math.round(message.mutableConfig?.maxBytesForLevelBase)); - message.mutableConfig?.$case === "maxBytesForLevelMultiplier" && - (obj.maxBytesForLevelMultiplier = Math.round(message.mutableConfig?.maxBytesForLevelMultiplier)); - message.mutableConfig?.$case === "maxCompactionBytes" && - (obj.maxCompactionBytes = Math.round(message.mutableConfig?.maxCompactionBytes)); - message.mutableConfig?.$case === "subLevelMaxCompactionBytes" && - (obj.subLevelMaxCompactionBytes = Math.round(message.mutableConfig?.subLevelMaxCompactionBytes)); - message.mutableConfig?.$case === "level0TierCompactFileNumber" && - (obj.level0TierCompactFileNumber = Math.round(message.mutableConfig?.level0TierCompactFileNumber)); - message.mutableConfig?.$case === "targetFileSizeBase" && - (obj.targetFileSizeBase = Math.round(message.mutableConfig?.targetFileSizeBase)); - message.mutableConfig?.$case === "compactionFilterMask" && - (obj.compactionFilterMask = Math.round(message.mutableConfig?.compactionFilterMask)); - message.mutableConfig?.$case === "maxSubCompaction" && - (obj.maxSubCompaction = Math.round(message.mutableConfig?.maxSubCompaction)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlUpdateCompactionConfigRequest_MutableConfig { - const message = createBaseRiseCtlUpdateCompactionConfigRequest_MutableConfig(); - if ( - object.mutableConfig?.$case === "maxBytesForLevelBase" && - object.mutableConfig?.maxBytesForLevelBase !== undefined && - object.mutableConfig?.maxBytesForLevelBase !== null - ) { - message.mutableConfig = { - $case: "maxBytesForLevelBase", - maxBytesForLevelBase: object.mutableConfig.maxBytesForLevelBase, - }; - } - if ( - object.mutableConfig?.$case === "maxBytesForLevelMultiplier" && - object.mutableConfig?.maxBytesForLevelMultiplier !== undefined && - object.mutableConfig?.maxBytesForLevelMultiplier !== null - ) { - message.mutableConfig = { - $case: "maxBytesForLevelMultiplier", - maxBytesForLevelMultiplier: object.mutableConfig.maxBytesForLevelMultiplier, - }; - } - if ( - object.mutableConfig?.$case === "maxCompactionBytes" && - object.mutableConfig?.maxCompactionBytes !== undefined && - object.mutableConfig?.maxCompactionBytes !== null - ) { - message.mutableConfig = { - $case: "maxCompactionBytes", - maxCompactionBytes: object.mutableConfig.maxCompactionBytes, - }; - } - if ( - object.mutableConfig?.$case === "subLevelMaxCompactionBytes" && - object.mutableConfig?.subLevelMaxCompactionBytes !== undefined && - object.mutableConfig?.subLevelMaxCompactionBytes !== null - ) { - message.mutableConfig = { - $case: "subLevelMaxCompactionBytes", - subLevelMaxCompactionBytes: object.mutableConfig.subLevelMaxCompactionBytes, - }; - } - if ( - object.mutableConfig?.$case === "level0TierCompactFileNumber" && - object.mutableConfig?.level0TierCompactFileNumber !== undefined && - object.mutableConfig?.level0TierCompactFileNumber !== null - ) { - message.mutableConfig = { - $case: "level0TierCompactFileNumber", - level0TierCompactFileNumber: object.mutableConfig.level0TierCompactFileNumber, - }; - } - if ( - object.mutableConfig?.$case === "targetFileSizeBase" && - object.mutableConfig?.targetFileSizeBase !== undefined && - object.mutableConfig?.targetFileSizeBase !== null - ) { - message.mutableConfig = { - $case: "targetFileSizeBase", - targetFileSizeBase: object.mutableConfig.targetFileSizeBase, - }; - } - if ( - object.mutableConfig?.$case === "compactionFilterMask" && - object.mutableConfig?.compactionFilterMask !== undefined && - object.mutableConfig?.compactionFilterMask !== null - ) { - message.mutableConfig = { - $case: "compactionFilterMask", - compactionFilterMask: object.mutableConfig.compactionFilterMask, - }; - } - if ( - object.mutableConfig?.$case === "maxSubCompaction" && - object.mutableConfig?.maxSubCompaction !== undefined && - object.mutableConfig?.maxSubCompaction !== null - ) { - message.mutableConfig = { $case: "maxSubCompaction", maxSubCompaction: object.mutableConfig.maxSubCompaction }; - } - return message; - }, -}; - -function createBaseRiseCtlUpdateCompactionConfigResponse(): RiseCtlUpdateCompactionConfigResponse { - return { status: undefined }; -} - -export const RiseCtlUpdateCompactionConfigResponse = { - fromJSON(object: any): RiseCtlUpdateCompactionConfigResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: RiseCtlUpdateCompactionConfigResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): RiseCtlUpdateCompactionConfigResponse { - const message = createBaseRiseCtlUpdateCompactionConfigResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseSetCompactorRuntimeConfigRequest(): SetCompactorRuntimeConfigRequest { - return { contextId: 0, config: undefined }; -} - -export const SetCompactorRuntimeConfigRequest = { - fromJSON(object: any): SetCompactorRuntimeConfigRequest { - return { - contextId: isSet(object.contextId) ? Number(object.contextId) : 0, - config: isSet(object.config) ? CompactorRuntimeConfig.fromJSON(object.config) : undefined, - }; - }, - - toJSON(message: SetCompactorRuntimeConfigRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - message.config !== undefined && - (obj.config = message.config ? CompactorRuntimeConfig.toJSON(message.config) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SetCompactorRuntimeConfigRequest { - const message = createBaseSetCompactorRuntimeConfigRequest(); - message.contextId = object.contextId ?? 0; - message.config = (object.config !== undefined && object.config !== null) - ? CompactorRuntimeConfig.fromPartial(object.config) - : undefined; - return message; - }, -}; - -function createBaseSetCompactorRuntimeConfigResponse(): SetCompactorRuntimeConfigResponse { - return {}; -} - -export const SetCompactorRuntimeConfigResponse = { - fromJSON(_: any): SetCompactorRuntimeConfigResponse { - return {}; - }, - - toJSON(_: SetCompactorRuntimeConfigResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): SetCompactorRuntimeConfigResponse { - const message = createBaseSetCompactorRuntimeConfigResponse(); - return message; - }, -}; - -function createBasePinVersionRequest(): PinVersionRequest { - return { contextId: 0 }; -} - -export const PinVersionRequest = { - fromJSON(object: any): PinVersionRequest { - return { contextId: isSet(object.contextId) ? Number(object.contextId) : 0 }; - }, - - toJSON(message: PinVersionRequest): unknown { - const obj: any = {}; - message.contextId !== undefined && (obj.contextId = Math.round(message.contextId)); - return obj; - }, - - fromPartial, I>>(object: I): PinVersionRequest { - const message = createBasePinVersionRequest(); - message.contextId = object.contextId ?? 0; - return message; - }, -}; - -function createBasePinVersionResponse(): PinVersionResponse { - return { pinnedVersion: undefined }; -} - -export const PinVersionResponse = { - fromJSON(object: any): PinVersionResponse { - return { pinnedVersion: isSet(object.pinnedVersion) ? HummockVersion.fromJSON(object.pinnedVersion) : undefined }; - }, - - toJSON(message: PinVersionResponse): unknown { - const obj: any = {}; - message.pinnedVersion !== undefined && - (obj.pinnedVersion = message.pinnedVersion ? HummockVersion.toJSON(message.pinnedVersion) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): PinVersionResponse { - const message = createBasePinVersionResponse(); - message.pinnedVersion = (object.pinnedVersion !== undefined && object.pinnedVersion !== null) - ? HummockVersion.fromPartial(object.pinnedVersion) - : undefined; - return message; - }, -}; - -function createBaseSplitCompactionGroupRequest(): SplitCompactionGroupRequest { - return { groupId: 0, tableIds: [] }; -} - -export const SplitCompactionGroupRequest = { - fromJSON(object: any): SplitCompactionGroupRequest { - return { - groupId: isSet(object.groupId) ? Number(object.groupId) : 0, - tableIds: Array.isArray(object?.tableIds) ? object.tableIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: SplitCompactionGroupRequest): unknown { - const obj: any = {}; - message.groupId !== undefined && (obj.groupId = Math.round(message.groupId)); - if (message.tableIds) { - obj.tableIds = message.tableIds.map((e) => Math.round(e)); - } else { - obj.tableIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SplitCompactionGroupRequest { - const message = createBaseSplitCompactionGroupRequest(); - message.groupId = object.groupId ?? 0; - message.tableIds = object.tableIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseSplitCompactionGroupResponse(): SplitCompactionGroupResponse { - return { newGroupId: 0 }; -} - -export const SplitCompactionGroupResponse = { - fromJSON(object: any): SplitCompactionGroupResponse { - return { newGroupId: isSet(object.newGroupId) ? Number(object.newGroupId) : 0 }; - }, - - toJSON(message: SplitCompactionGroupResponse): unknown { - const obj: any = {}; - message.newGroupId !== undefined && (obj.newGroupId = Math.round(message.newGroupId)); - return obj; - }, - - fromPartial, I>>(object: I): SplitCompactionGroupResponse { - const message = createBaseSplitCompactionGroupResponse(); - message.newGroupId = object.newGroupId ?? 0; - return message; - }, -}; - -function createBaseCompactionConfig(): CompactionConfig { - return { - maxBytesForLevelBase: 0, - maxLevel: 0, - maxBytesForLevelMultiplier: 0, - maxCompactionBytes: 0, - subLevelMaxCompactionBytes: 0, - level0TierCompactFileNumber: 0, - compactionMode: CompactionConfig_CompactionMode.UNSPECIFIED, - compressionAlgorithm: [], - targetFileSizeBase: 0, - compactionFilterMask: 0, - maxSubCompaction: 0, - maxSpaceReclaimBytes: 0, - splitByStateTable: false, - }; -} - -export const CompactionConfig = { - fromJSON(object: any): CompactionConfig { - return { - maxBytesForLevelBase: isSet(object.maxBytesForLevelBase) ? Number(object.maxBytesForLevelBase) : 0, - maxLevel: isSet(object.maxLevel) ? Number(object.maxLevel) : 0, - maxBytesForLevelMultiplier: isSet(object.maxBytesForLevelMultiplier) - ? Number(object.maxBytesForLevelMultiplier) - : 0, - maxCompactionBytes: isSet(object.maxCompactionBytes) ? Number(object.maxCompactionBytes) : 0, - subLevelMaxCompactionBytes: isSet(object.subLevelMaxCompactionBytes) - ? Number(object.subLevelMaxCompactionBytes) - : 0, - level0TierCompactFileNumber: isSet(object.level0TierCompactFileNumber) - ? Number(object.level0TierCompactFileNumber) - : 0, - compactionMode: isSet(object.compactionMode) - ? compactionConfig_CompactionModeFromJSON(object.compactionMode) - : CompactionConfig_CompactionMode.UNSPECIFIED, - compressionAlgorithm: Array.isArray(object?.compressionAlgorithm) - ? object.compressionAlgorithm.map((e: any) => String(e)) - : [], - targetFileSizeBase: isSet(object.targetFileSizeBase) ? Number(object.targetFileSizeBase) : 0, - compactionFilterMask: isSet(object.compactionFilterMask) ? Number(object.compactionFilterMask) : 0, - maxSubCompaction: isSet(object.maxSubCompaction) ? Number(object.maxSubCompaction) : 0, - maxSpaceReclaimBytes: isSet(object.maxSpaceReclaimBytes) ? Number(object.maxSpaceReclaimBytes) : 0, - splitByStateTable: isSet(object.splitByStateTable) ? Boolean(object.splitByStateTable) : false, - }; - }, - - toJSON(message: CompactionConfig): unknown { - const obj: any = {}; - message.maxBytesForLevelBase !== undefined && (obj.maxBytesForLevelBase = Math.round(message.maxBytesForLevelBase)); - message.maxLevel !== undefined && (obj.maxLevel = Math.round(message.maxLevel)); - message.maxBytesForLevelMultiplier !== undefined && - (obj.maxBytesForLevelMultiplier = Math.round(message.maxBytesForLevelMultiplier)); - message.maxCompactionBytes !== undefined && (obj.maxCompactionBytes = Math.round(message.maxCompactionBytes)); - message.subLevelMaxCompactionBytes !== undefined && - (obj.subLevelMaxCompactionBytes = Math.round(message.subLevelMaxCompactionBytes)); - message.level0TierCompactFileNumber !== undefined && - (obj.level0TierCompactFileNumber = Math.round(message.level0TierCompactFileNumber)); - message.compactionMode !== undefined && - (obj.compactionMode = compactionConfig_CompactionModeToJSON(message.compactionMode)); - if (message.compressionAlgorithm) { - obj.compressionAlgorithm = message.compressionAlgorithm.map((e) => e); - } else { - obj.compressionAlgorithm = []; - } - message.targetFileSizeBase !== undefined && (obj.targetFileSizeBase = Math.round(message.targetFileSizeBase)); - message.compactionFilterMask !== undefined && (obj.compactionFilterMask = Math.round(message.compactionFilterMask)); - message.maxSubCompaction !== undefined && (obj.maxSubCompaction = Math.round(message.maxSubCompaction)); - message.maxSpaceReclaimBytes !== undefined && (obj.maxSpaceReclaimBytes = Math.round(message.maxSpaceReclaimBytes)); - message.splitByStateTable !== undefined && (obj.splitByStateTable = message.splitByStateTable); - return obj; - }, - - fromPartial, I>>(object: I): CompactionConfig { - const message = createBaseCompactionConfig(); - message.maxBytesForLevelBase = object.maxBytesForLevelBase ?? 0; - message.maxLevel = object.maxLevel ?? 0; - message.maxBytesForLevelMultiplier = object.maxBytesForLevelMultiplier ?? 0; - message.maxCompactionBytes = object.maxCompactionBytes ?? 0; - message.subLevelMaxCompactionBytes = object.subLevelMaxCompactionBytes ?? 0; - message.level0TierCompactFileNumber = object.level0TierCompactFileNumber ?? 0; - message.compactionMode = object.compactionMode ?? CompactionConfig_CompactionMode.UNSPECIFIED; - message.compressionAlgorithm = object.compressionAlgorithm?.map((e) => e) || []; - message.targetFileSizeBase = object.targetFileSizeBase ?? 0; - message.compactionFilterMask = object.compactionFilterMask ?? 0; - message.maxSubCompaction = object.maxSubCompaction ?? 0; - message.maxSpaceReclaimBytes = object.maxSpaceReclaimBytes ?? 0; - message.splitByStateTable = object.splitByStateTable ?? false; - return message; - }, -}; - -function createBaseTableStats(): TableStats { - return { totalKeySize: 0, totalValueSize: 0, totalKeyCount: 0 }; -} - -export const TableStats = { - fromJSON(object: any): TableStats { - return { - totalKeySize: isSet(object.totalKeySize) ? Number(object.totalKeySize) : 0, - totalValueSize: isSet(object.totalValueSize) ? Number(object.totalValueSize) : 0, - totalKeyCount: isSet(object.totalKeyCount) ? Number(object.totalKeyCount) : 0, - }; - }, - - toJSON(message: TableStats): unknown { - const obj: any = {}; - message.totalKeySize !== undefined && (obj.totalKeySize = Math.round(message.totalKeySize)); - message.totalValueSize !== undefined && (obj.totalValueSize = Math.round(message.totalValueSize)); - message.totalKeyCount !== undefined && (obj.totalKeyCount = Math.round(message.totalKeyCount)); - return obj; - }, - - fromPartial, I>>(object: I): TableStats { - const message = createBaseTableStats(); - message.totalKeySize = object.totalKeySize ?? 0; - message.totalValueSize = object.totalValueSize ?? 0; - message.totalKeyCount = object.totalKeyCount ?? 0; - return message; - }, -}; - -function createBaseHummockVersionStats(): HummockVersionStats { - return { hummockVersionId: 0, tableStats: {} }; -} - -export const HummockVersionStats = { - fromJSON(object: any): HummockVersionStats { - return { - hummockVersionId: isSet(object.hummockVersionId) ? Number(object.hummockVersionId) : 0, - tableStats: isObject(object.tableStats) - ? Object.entries(object.tableStats).reduce<{ [key: number]: TableStats }>((acc, [key, value]) => { - acc[Number(key)] = TableStats.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: HummockVersionStats): unknown { - const obj: any = {}; - message.hummockVersionId !== undefined && (obj.hummockVersionId = Math.round(message.hummockVersionId)); - obj.tableStats = {}; - if (message.tableStats) { - Object.entries(message.tableStats).forEach(([k, v]) => { - obj.tableStats[k] = TableStats.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): HummockVersionStats { - const message = createBaseHummockVersionStats(); - message.hummockVersionId = object.hummockVersionId ?? 0; - message.tableStats = Object.entries(object.tableStats ?? {}).reduce<{ [key: number]: TableStats }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableStats.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseHummockVersionStats_TableStatsEntry(): HummockVersionStats_TableStatsEntry { - return { key: 0, value: undefined }; -} - -export const HummockVersionStats_TableStatsEntry = { - fromJSON(object: any): HummockVersionStats_TableStatsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableStats.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: HummockVersionStats_TableStatsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? TableStats.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): HummockVersionStats_TableStatsEntry { - const message = createBaseHummockVersionStats_TableStatsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableStats.fromPartial(object.value) - : undefined; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/java_binding.ts b/dashboard/proto/gen/java_binding.ts deleted file mode 100644 index 5cc7e19adaf82..0000000000000 --- a/dashboard/proto/gen/java_binding.ts +++ /dev/null @@ -1,237 +0,0 @@ -/* eslint-disable */ -import { Table } from "./catalog"; -import { HummockVersion } from "./hummock"; - -export const protobufPackage = "java_binding"; - -/** When `left` or `right` is none, it represents unbounded. */ -export interface KeyRange { - left: Uint8Array; - right: Uint8Array; - leftBound: KeyRange_Bound; - rightBound: KeyRange_Bound; -} - -export const KeyRange_Bound = { - UNSPECIFIED: "UNSPECIFIED", - UNBOUNDED: "UNBOUNDED", - INCLUDED: "INCLUDED", - EXCLUDED: "EXCLUDED", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type KeyRange_Bound = typeof KeyRange_Bound[keyof typeof KeyRange_Bound]; - -export function keyRange_BoundFromJSON(object: any): KeyRange_Bound { - switch (object) { - case 0: - case "UNSPECIFIED": - return KeyRange_Bound.UNSPECIFIED; - case 1: - case "UNBOUNDED": - return KeyRange_Bound.UNBOUNDED; - case 2: - case "INCLUDED": - return KeyRange_Bound.INCLUDED; - case 3: - case "EXCLUDED": - return KeyRange_Bound.EXCLUDED; - case -1: - case "UNRECOGNIZED": - default: - return KeyRange_Bound.UNRECOGNIZED; - } -} - -export function keyRange_BoundToJSON(object: KeyRange_Bound): string { - switch (object) { - case KeyRange_Bound.UNSPECIFIED: - return "UNSPECIFIED"; - case KeyRange_Bound.UNBOUNDED: - return "UNBOUNDED"; - case KeyRange_Bound.INCLUDED: - return "INCLUDED"; - case KeyRange_Bound.EXCLUDED: - return "EXCLUDED"; - case KeyRange_Bound.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface ReadPlan { - objectStoreUrl: string; - dataDir: string; - keyRange: KeyRange | undefined; - tableId: number; - epoch: number; - version: HummockVersion | undefined; - tableCatalog: Table | undefined; - vnodeIds: number[]; -} - -function createBaseKeyRange(): KeyRange { - return { - left: new Uint8Array(), - right: new Uint8Array(), - leftBound: KeyRange_Bound.UNSPECIFIED, - rightBound: KeyRange_Bound.UNSPECIFIED, - }; -} - -export const KeyRange = { - fromJSON(object: any): KeyRange { - return { - left: isSet(object.left) ? bytesFromBase64(object.left) : new Uint8Array(), - right: isSet(object.right) ? bytesFromBase64(object.right) : new Uint8Array(), - leftBound: isSet(object.leftBound) ? keyRange_BoundFromJSON(object.leftBound) : KeyRange_Bound.UNSPECIFIED, - rightBound: isSet(object.rightBound) ? keyRange_BoundFromJSON(object.rightBound) : KeyRange_Bound.UNSPECIFIED, - }; - }, - - toJSON(message: KeyRange): unknown { - const obj: any = {}; - message.left !== undefined && - (obj.left = base64FromBytes(message.left !== undefined ? message.left : new Uint8Array())); - message.right !== undefined && - (obj.right = base64FromBytes(message.right !== undefined ? message.right : new Uint8Array())); - message.leftBound !== undefined && (obj.leftBound = keyRange_BoundToJSON(message.leftBound)); - message.rightBound !== undefined && (obj.rightBound = keyRange_BoundToJSON(message.rightBound)); - return obj; - }, - - fromPartial, I>>(object: I): KeyRange { - const message = createBaseKeyRange(); - message.left = object.left ?? new Uint8Array(); - message.right = object.right ?? new Uint8Array(); - message.leftBound = object.leftBound ?? KeyRange_Bound.UNSPECIFIED; - message.rightBound = object.rightBound ?? KeyRange_Bound.UNSPECIFIED; - return message; - }, -}; - -function createBaseReadPlan(): ReadPlan { - return { - objectStoreUrl: "", - dataDir: "", - keyRange: undefined, - tableId: 0, - epoch: 0, - version: undefined, - tableCatalog: undefined, - vnodeIds: [], - }; -} - -export const ReadPlan = { - fromJSON(object: any): ReadPlan { - return { - objectStoreUrl: isSet(object.objectStoreUrl) ? String(object.objectStoreUrl) : "", - dataDir: isSet(object.dataDir) ? String(object.dataDir) : "", - keyRange: isSet(object.keyRange) ? KeyRange.fromJSON(object.keyRange) : undefined, - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - epoch: isSet(object.epoch) ? Number(object.epoch) : 0, - version: isSet(object.version) ? HummockVersion.fromJSON(object.version) : undefined, - tableCatalog: isSet(object.tableCatalog) ? Table.fromJSON(object.tableCatalog) : undefined, - vnodeIds: Array.isArray(object?.vnodeIds) ? object.vnodeIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ReadPlan): unknown { - const obj: any = {}; - message.objectStoreUrl !== undefined && (obj.objectStoreUrl = message.objectStoreUrl); - message.dataDir !== undefined && (obj.dataDir = message.dataDir); - message.keyRange !== undefined && (obj.keyRange = message.keyRange ? KeyRange.toJSON(message.keyRange) : undefined); - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - message.version !== undefined && - (obj.version = message.version ? HummockVersion.toJSON(message.version) : undefined); - message.tableCatalog !== undefined && - (obj.tableCatalog = message.tableCatalog ? Table.toJSON(message.tableCatalog) : undefined); - if (message.vnodeIds) { - obj.vnodeIds = message.vnodeIds.map((e) => Math.round(e)); - } else { - obj.vnodeIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ReadPlan { - const message = createBaseReadPlan(); - message.objectStoreUrl = object.objectStoreUrl ?? ""; - message.dataDir = object.dataDir ?? ""; - message.keyRange = (object.keyRange !== undefined && object.keyRange !== null) - ? KeyRange.fromPartial(object.keyRange) - : undefined; - message.tableId = object.tableId ?? 0; - message.epoch = object.epoch ?? 0; - message.version = (object.version !== undefined && object.version !== null) - ? HummockVersion.fromPartial(object.version) - : undefined; - message.tableCatalog = (object.tableCatalog !== undefined && object.tableCatalog !== null) - ? Table.fromPartial(object.tableCatalog) - : undefined; - message.vnodeIds = object.vnodeIds?.map((e) => e) || []; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/meta.ts b/dashboard/proto/gen/meta.ts deleted file mode 100644 index f85c4aefce7e9..0000000000000 --- a/dashboard/proto/gen/meta.ts +++ /dev/null @@ -1,2637 +0,0 @@ -/* eslint-disable */ -import { MetaBackupManifestId } from "./backup_service"; -import { Database, Function, Index, Schema, Sink, Source, Table, View } from "./catalog"; -import { - HostAddress, - ParallelUnit, - ParallelUnitMapping, - Status, - WorkerNode, - WorkerType, - workerTypeFromJSON, - workerTypeToJSON, -} from "./common"; -import { HummockSnapshot, HummockVersion, HummockVersionDeltas } from "./hummock"; -import { ConnectorSplits } from "./source"; -import { Dispatcher, StreamActor, StreamEnvironment, StreamNode } from "./stream_plan"; -import { UserInfo } from "./user"; - -export const protobufPackage = "meta"; - -export const SubscribeType = { - UNSPECIFIED: "UNSPECIFIED", - FRONTEND: "FRONTEND", - HUMMOCK: "HUMMOCK", - COMPACTOR: "COMPACTOR", - COMPUTE: "COMPUTE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type SubscribeType = typeof SubscribeType[keyof typeof SubscribeType]; - -export function subscribeTypeFromJSON(object: any): SubscribeType { - switch (object) { - case 0: - case "UNSPECIFIED": - return SubscribeType.UNSPECIFIED; - case 1: - case "FRONTEND": - return SubscribeType.FRONTEND; - case 2: - case "HUMMOCK": - return SubscribeType.HUMMOCK; - case 3: - case "COMPACTOR": - return SubscribeType.COMPACTOR; - case 4: - case "COMPUTE": - return SubscribeType.COMPUTE; - case -1: - case "UNRECOGNIZED": - default: - return SubscribeType.UNRECOGNIZED; - } -} - -export function subscribeTypeToJSON(object: SubscribeType): string { - switch (object) { - case SubscribeType.UNSPECIFIED: - return "UNSPECIFIED"; - case SubscribeType.FRONTEND: - return "FRONTEND"; - case SubscribeType.HUMMOCK: - return "HUMMOCK"; - case SubscribeType.COMPACTOR: - return "COMPACTOR"; - case SubscribeType.COMPUTE: - return "COMPUTE"; - case SubscribeType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface HeartbeatRequest { - nodeId: number; - /** Lightweight info piggybacked by heartbeat request. */ - info: HeartbeatRequest_ExtraInfo[]; -} - -export interface HeartbeatRequest_ExtraInfo { - info?: { $case: "hummockGcWatermark"; hummockGcWatermark: number }; -} - -export interface HeartbeatResponse { - status: Status | undefined; -} - -/** Fragments of a Streaming Job */ -export interface TableFragments { - tableId: number; - state: TableFragments_State; - fragments: { [key: number]: TableFragments_Fragment }; - actorStatus: { [key: number]: TableFragments_ActorStatus }; - actorSplits: { [key: number]: ConnectorSplits }; - env: StreamEnvironment | undefined; -} - -/** The state of the fragments of this table */ -export const TableFragments_State = { - UNSPECIFIED: "UNSPECIFIED", - /** INITIAL - The streaming job is initial. */ - INITIAL: "INITIAL", - /** CREATING - The streaming job is creating. */ - CREATING: "CREATING", - /** CREATED - The streaming job has been created. */ - CREATED: "CREATED", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type TableFragments_State = typeof TableFragments_State[keyof typeof TableFragments_State]; - -export function tableFragments_StateFromJSON(object: any): TableFragments_State { - switch (object) { - case 0: - case "UNSPECIFIED": - return TableFragments_State.UNSPECIFIED; - case 1: - case "INITIAL": - return TableFragments_State.INITIAL; - case 2: - case "CREATING": - return TableFragments_State.CREATING; - case 3: - case "CREATED": - return TableFragments_State.CREATED; - case -1: - case "UNRECOGNIZED": - default: - return TableFragments_State.UNRECOGNIZED; - } -} - -export function tableFragments_StateToJSON(object: TableFragments_State): string { - switch (object) { - case TableFragments_State.UNSPECIFIED: - return "UNSPECIFIED"; - case TableFragments_State.INITIAL: - return "INITIAL"; - case TableFragments_State.CREATING: - return "CREATING"; - case TableFragments_State.CREATED: - return "CREATED"; - case TableFragments_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Runtime information of an actor */ -export interface TableFragments_ActorStatus { - /** Current on which parallel unit */ - parallelUnit: - | ParallelUnit - | undefined; - /** Current state */ - state: TableFragments_ActorStatus_ActorState; -} - -/** Current state of actor */ -export const TableFragments_ActorStatus_ActorState = { - UNSPECIFIED: "UNSPECIFIED", - /** INACTIVE - Initial state after creation */ - INACTIVE: "INACTIVE", - /** RUNNING - Running normally */ - RUNNING: "RUNNING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type TableFragments_ActorStatus_ActorState = - typeof TableFragments_ActorStatus_ActorState[keyof typeof TableFragments_ActorStatus_ActorState]; - -export function tableFragments_ActorStatus_ActorStateFromJSON(object: any): TableFragments_ActorStatus_ActorState { - switch (object) { - case 0: - case "UNSPECIFIED": - return TableFragments_ActorStatus_ActorState.UNSPECIFIED; - case 1: - case "INACTIVE": - return TableFragments_ActorStatus_ActorState.INACTIVE; - case 2: - case "RUNNING": - return TableFragments_ActorStatus_ActorState.RUNNING; - case -1: - case "UNRECOGNIZED": - default: - return TableFragments_ActorStatus_ActorState.UNRECOGNIZED; - } -} - -export function tableFragments_ActorStatus_ActorStateToJSON(object: TableFragments_ActorStatus_ActorState): string { - switch (object) { - case TableFragments_ActorStatus_ActorState.UNSPECIFIED: - return "UNSPECIFIED"; - case TableFragments_ActorStatus_ActorState.INACTIVE: - return "INACTIVE"; - case TableFragments_ActorStatus_ActorState.RUNNING: - return "RUNNING"; - case TableFragments_ActorStatus_ActorState.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableFragments_Fragment { - fragmentId: number; - /** Bitwise-OR of FragmentTypeFlags */ - fragmentTypeMask: number; - distributionType: TableFragments_Fragment_FragmentDistributionType; - actors: StreamActor[]; - /** - * Vnode mapping (which should be set in upstream dispatcher) of the fragment. - * This field is always set to `Some`. For singleton, the parallel unit for all vnodes will be the same. - */ - vnodeMapping: ParallelUnitMapping | undefined; - stateTableIds: number[]; - /** - * Note that this can be derived backwards from the upstream actors of the Actor held by the Fragment, - * but in some scenarios (e.g. Scaling) it will lead to a lot of duplicate code, - * so we pre-generate and store it here, this member will only be initialized when creating the Fragment - * and modified when creating the mv-on-mv - */ - upstreamFragmentIds: number[]; -} - -export const TableFragments_Fragment_FragmentDistributionType = { - UNSPECIFIED: "UNSPECIFIED", - SINGLE: "SINGLE", - HASH: "HASH", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type TableFragments_Fragment_FragmentDistributionType = - typeof TableFragments_Fragment_FragmentDistributionType[ - keyof typeof TableFragments_Fragment_FragmentDistributionType - ]; - -export function tableFragments_Fragment_FragmentDistributionTypeFromJSON( - object: any, -): TableFragments_Fragment_FragmentDistributionType { - switch (object) { - case 0: - case "UNSPECIFIED": - return TableFragments_Fragment_FragmentDistributionType.UNSPECIFIED; - case 1: - case "SINGLE": - return TableFragments_Fragment_FragmentDistributionType.SINGLE; - case 2: - case "HASH": - return TableFragments_Fragment_FragmentDistributionType.HASH; - case -1: - case "UNRECOGNIZED": - default: - return TableFragments_Fragment_FragmentDistributionType.UNRECOGNIZED; - } -} - -export function tableFragments_Fragment_FragmentDistributionTypeToJSON( - object: TableFragments_Fragment_FragmentDistributionType, -): string { - switch (object) { - case TableFragments_Fragment_FragmentDistributionType.UNSPECIFIED: - return "UNSPECIFIED"; - case TableFragments_Fragment_FragmentDistributionType.SINGLE: - return "SINGLE"; - case TableFragments_Fragment_FragmentDistributionType.HASH: - return "HASH"; - case TableFragments_Fragment_FragmentDistributionType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface TableFragments_FragmentsEntry { - key: number; - value: TableFragments_Fragment | undefined; -} - -export interface TableFragments_ActorStatusEntry { - key: number; - value: TableFragments_ActorStatus | undefined; -} - -export interface TableFragments_ActorSplitsEntry { - key: number; - value: ConnectorSplits | undefined; -} - -/** / Parallel unit mapping with fragment id, used for notification. */ -export interface FragmentParallelUnitMapping { - fragmentId: number; - mapping: ParallelUnitMapping | undefined; -} - -/** TODO: remove this when dashboard refactored. */ -export interface ActorLocation { - node: WorkerNode | undefined; - actors: StreamActor[]; -} - -export interface FlushRequest { - checkpoint: boolean; -} - -export interface FlushResponse { - status: Status | undefined; - snapshot: HummockSnapshot | undefined; -} - -export interface CreatingJobInfo { - databaseId: number; - schemaId: number; - name: string; -} - -export interface CancelCreatingJobsRequest { - infos: CreatingJobInfo[]; -} - -export interface CancelCreatingJobsResponse { - status: Status | undefined; -} - -export interface ListTableFragmentsRequest { - tableIds: number[]; -} - -export interface ListTableFragmentsResponse { - tableFragments: { [key: number]: ListTableFragmentsResponse_TableFragmentInfo }; -} - -export interface ListTableFragmentsResponse_ActorInfo { - id: number; - node: StreamNode | undefined; - dispatcher: Dispatcher[]; -} - -export interface ListTableFragmentsResponse_FragmentInfo { - id: number; - actors: ListTableFragmentsResponse_ActorInfo[]; -} - -export interface ListTableFragmentsResponse_TableFragmentInfo { - fragments: ListTableFragmentsResponse_FragmentInfo[]; - env: StreamEnvironment | undefined; -} - -export interface ListTableFragmentsResponse_TableFragmentsEntry { - key: number; - value: ListTableFragmentsResponse_TableFragmentInfo | undefined; -} - -export interface AddWorkerNodeRequest { - workerType: WorkerType; - host: HostAddress | undefined; - workerNodeParallelism: number; -} - -export interface AddWorkerNodeResponse { - status: Status | undefined; - node: WorkerNode | undefined; -} - -export interface ActivateWorkerNodeRequest { - host: HostAddress | undefined; -} - -export interface ActivateWorkerNodeResponse { - status: Status | undefined; -} - -export interface DeleteWorkerNodeRequest { - host: HostAddress | undefined; -} - -export interface DeleteWorkerNodeResponse { - status: Status | undefined; -} - -export interface ListAllNodesRequest { - workerType: WorkerType; - /** Whether to include nodes still starting */ - includeStartingNodes: boolean; -} - -export interface ListAllNodesResponse { - status: Status | undefined; - nodes: WorkerNode[]; -} - -/** Below for notification service. */ -export interface SubscribeRequest { - subscribeType: SubscribeType; - host: HostAddress | undefined; - workerId: number; -} - -export interface MetaSnapshot { - databases: Database[]; - schemas: Schema[]; - sources: Source[]; - sinks: Sink[]; - tables: Table[]; - indexes: Index[]; - views: View[]; - functions: Function[]; - users: UserInfo[]; - parallelUnitMappings: FragmentParallelUnitMapping[]; - nodes: WorkerNode[]; - hummockSnapshot: HummockSnapshot | undefined; - hummockVersion: HummockVersion | undefined; - version: MetaSnapshot_SnapshotVersion | undefined; - metaBackupManifestId: MetaBackupManifestId | undefined; -} - -export interface MetaSnapshot_SnapshotVersion { - catalogVersion: number; - parallelUnitMappingVersion: number; - workerNodeVersion: number; -} - -export interface SubscribeResponse { - status: Status | undefined; - operation: SubscribeResponse_Operation; - version: number; - info?: - | { $case: "database"; database: Database } - | { $case: "schema"; schema: Schema } - | { $case: "table"; table: Table } - | { $case: "source"; source: Source } - | { $case: "sink"; sink: Sink } - | { $case: "index"; index: Index } - | { $case: "view"; view: View } - | { $case: "function"; function: Function } - | { $case: "user"; user: UserInfo } - | { $case: "parallelUnitMapping"; parallelUnitMapping: FragmentParallelUnitMapping } - | { $case: "node"; node: WorkerNode } - | { $case: "hummockSnapshot"; hummockSnapshot: HummockSnapshot } - | { $case: "hummockVersionDeltas"; hummockVersionDeltas: HummockVersionDeltas } - | { $case: "snapshot"; snapshot: MetaSnapshot } - | { $case: "metaBackupManifestId"; metaBackupManifestId: MetaBackupManifestId } - | { $case: "systemParams"; systemParams: SystemParams }; -} - -export const SubscribeResponse_Operation = { - UNSPECIFIED: "UNSPECIFIED", - ADD: "ADD", - DELETE: "DELETE", - UPDATE: "UPDATE", - SNAPSHOT: "SNAPSHOT", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type SubscribeResponse_Operation = typeof SubscribeResponse_Operation[keyof typeof SubscribeResponse_Operation]; - -export function subscribeResponse_OperationFromJSON(object: any): SubscribeResponse_Operation { - switch (object) { - case 0: - case "UNSPECIFIED": - return SubscribeResponse_Operation.UNSPECIFIED; - case 1: - case "ADD": - return SubscribeResponse_Operation.ADD; - case 2: - case "DELETE": - return SubscribeResponse_Operation.DELETE; - case 3: - case "UPDATE": - return SubscribeResponse_Operation.UPDATE; - case 4: - case "SNAPSHOT": - return SubscribeResponse_Operation.SNAPSHOT; - case -1: - case "UNRECOGNIZED": - default: - return SubscribeResponse_Operation.UNRECOGNIZED; - } -} - -export function subscribeResponse_OperationToJSON(object: SubscribeResponse_Operation): string { - switch (object) { - case SubscribeResponse_Operation.UNSPECIFIED: - return "UNSPECIFIED"; - case SubscribeResponse_Operation.ADD: - return "ADD"; - case SubscribeResponse_Operation.DELETE: - return "DELETE"; - case SubscribeResponse_Operation.UPDATE: - return "UPDATE"; - case SubscribeResponse_Operation.SNAPSHOT: - return "SNAPSHOT"; - case SubscribeResponse_Operation.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface PauseRequest { -} - -export interface PauseResponse { -} - -export interface ResumeRequest { -} - -export interface ResumeResponse { -} - -export interface GetClusterInfoRequest { -} - -export interface GetClusterInfoResponse { - workerNodes: WorkerNode[]; - tableFragments: TableFragments[]; - actorSplits: { [key: number]: ConnectorSplits }; - sourceInfos: { [key: number]: Source }; -} - -export interface GetClusterInfoResponse_ActorSplitsEntry { - key: number; - value: ConnectorSplits | undefined; -} - -export interface GetClusterInfoResponse_SourceInfosEntry { - key: number; - value: Source | undefined; -} - -export interface RescheduleRequest { - /** reschedule plan for each fragment */ - reschedules: { [key: number]: RescheduleRequest_Reschedule }; -} - -export interface RescheduleRequest_Reschedule { - addedParallelUnits: number[]; - removedParallelUnits: number[]; -} - -export interface RescheduleRequest_ReschedulesEntry { - key: number; - value: RescheduleRequest_Reschedule | undefined; -} - -export interface RescheduleResponse { - success: boolean; -} - -export interface MembersRequest { -} - -export interface MetaMember { - address: HostAddress | undefined; - isLeader: boolean; -} - -export interface MembersResponse { - members: MetaMember[]; -} - -/** - * The schema for persisted system parameters. - * Note on backward compatibility: - * - Do not remove deprecated fields. Mark them as deprecated both after the field definition and in `system_params/mod.rs` instead. - * - Do not rename existing fields, since each field is stored separately in the meta store with the field name as the key. - * - To modify (rename, change the type or semantic of) a field, introduce a new field suffixed by the version. - */ -export interface SystemParams { - barrierIntervalMs?: number | undefined; - checkpointFrequency?: number | undefined; - sstableSizeMb?: number | undefined; - blockSizeKb?: number | undefined; - bloomFalsePositive?: number | undefined; - stateStore?: string | undefined; - dataDirectory?: string | undefined; - backupStorageUrl?: string | undefined; - backupStorageDirectory?: string | undefined; -} - -export interface GetSystemParamsRequest { -} - -export interface GetSystemParamsResponse { - params: SystemParams | undefined; -} - -export interface SetSystemParamRequest { - param: string; - /** None means set to default value. */ - value?: string | undefined; -} - -export interface SetSystemParamResponse { -} - -function createBaseHeartbeatRequest(): HeartbeatRequest { - return { nodeId: 0, info: [] }; -} - -export const HeartbeatRequest = { - fromJSON(object: any): HeartbeatRequest { - return { - nodeId: isSet(object.nodeId) ? Number(object.nodeId) : 0, - info: Array.isArray(object?.info) ? object.info.map((e: any) => HeartbeatRequest_ExtraInfo.fromJSON(e)) : [], - }; - }, - - toJSON(message: HeartbeatRequest): unknown { - const obj: any = {}; - message.nodeId !== undefined && (obj.nodeId = Math.round(message.nodeId)); - if (message.info) { - obj.info = message.info.map((e) => e ? HeartbeatRequest_ExtraInfo.toJSON(e) : undefined); - } else { - obj.info = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HeartbeatRequest { - const message = createBaseHeartbeatRequest(); - message.nodeId = object.nodeId ?? 0; - message.info = object.info?.map((e) => HeartbeatRequest_ExtraInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseHeartbeatRequest_ExtraInfo(): HeartbeatRequest_ExtraInfo { - return { info: undefined }; -} - -export const HeartbeatRequest_ExtraInfo = { - fromJSON(object: any): HeartbeatRequest_ExtraInfo { - return { - info: isSet(object.hummockGcWatermark) - ? { $case: "hummockGcWatermark", hummockGcWatermark: Number(object.hummockGcWatermark) } - : undefined, - }; - }, - - toJSON(message: HeartbeatRequest_ExtraInfo): unknown { - const obj: any = {}; - message.info?.$case === "hummockGcWatermark" && - (obj.hummockGcWatermark = Math.round(message.info?.hummockGcWatermark)); - return obj; - }, - - fromPartial, I>>(object: I): HeartbeatRequest_ExtraInfo { - const message = createBaseHeartbeatRequest_ExtraInfo(); - if ( - object.info?.$case === "hummockGcWatermark" && - object.info?.hummockGcWatermark !== undefined && - object.info?.hummockGcWatermark !== null - ) { - message.info = { $case: "hummockGcWatermark", hummockGcWatermark: object.info.hummockGcWatermark }; - } - return message; - }, -}; - -function createBaseHeartbeatResponse(): HeartbeatResponse { - return { status: undefined }; -} - -export const HeartbeatResponse = { - fromJSON(object: any): HeartbeatResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: HeartbeatResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): HeartbeatResponse { - const message = createBaseHeartbeatResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseTableFragments(): TableFragments { - return { - tableId: 0, - state: TableFragments_State.UNSPECIFIED, - fragments: {}, - actorStatus: {}, - actorSplits: {}, - env: undefined, - }; -} - -export const TableFragments = { - fromJSON(object: any): TableFragments { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - state: isSet(object.state) ? tableFragments_StateFromJSON(object.state) : TableFragments_State.UNSPECIFIED, - fragments: isObject(object.fragments) - ? Object.entries(object.fragments).reduce<{ [key: number]: TableFragments_Fragment }>((acc, [key, value]) => { - acc[Number(key)] = TableFragments_Fragment.fromJSON(value); - return acc; - }, {}) - : {}, - actorStatus: isObject(object.actorStatus) - ? Object.entries(object.actorStatus).reduce<{ [key: number]: TableFragments_ActorStatus }>( - (acc, [key, value]) => { - acc[Number(key)] = TableFragments_ActorStatus.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - actorSplits: isObject(object.actorSplits) - ? Object.entries(object.actorSplits).reduce<{ [key: number]: ConnectorSplits }>((acc, [key, value]) => { - acc[Number(key)] = ConnectorSplits.fromJSON(value); - return acc; - }, {}) - : {}, - env: isSet(object.env) ? StreamEnvironment.fromJSON(object.env) : undefined, - }; - }, - - toJSON(message: TableFragments): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.state !== undefined && (obj.state = tableFragments_StateToJSON(message.state)); - obj.fragments = {}; - if (message.fragments) { - Object.entries(message.fragments).forEach(([k, v]) => { - obj.fragments[k] = TableFragments_Fragment.toJSON(v); - }); - } - obj.actorStatus = {}; - if (message.actorStatus) { - Object.entries(message.actorStatus).forEach(([k, v]) => { - obj.actorStatus[k] = TableFragments_ActorStatus.toJSON(v); - }); - } - obj.actorSplits = {}; - if (message.actorSplits) { - Object.entries(message.actorSplits).forEach(([k, v]) => { - obj.actorSplits[k] = ConnectorSplits.toJSON(v); - }); - } - message.env !== undefined && (obj.env = message.env ? StreamEnvironment.toJSON(message.env) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): TableFragments { - const message = createBaseTableFragments(); - message.tableId = object.tableId ?? 0; - message.state = object.state ?? TableFragments_State.UNSPECIFIED; - message.fragments = Object.entries(object.fragments ?? {}).reduce<{ [key: number]: TableFragments_Fragment }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableFragments_Fragment.fromPartial(value); - } - return acc; - }, - {}, - ); - message.actorStatus = Object.entries(object.actorStatus ?? {}).reduce< - { [key: number]: TableFragments_ActorStatus } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableFragments_ActorStatus.fromPartial(value); - } - return acc; - }, {}); - message.actorSplits = Object.entries(object.actorSplits ?? {}).reduce<{ [key: number]: ConnectorSplits }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ConnectorSplits.fromPartial(value); - } - return acc; - }, - {}, - ); - message.env = (object.env !== undefined && object.env !== null) - ? StreamEnvironment.fromPartial(object.env) - : undefined; - return message; - }, -}; - -function createBaseTableFragments_ActorStatus(): TableFragments_ActorStatus { - return { parallelUnit: undefined, state: TableFragments_ActorStatus_ActorState.UNSPECIFIED }; -} - -export const TableFragments_ActorStatus = { - fromJSON(object: any): TableFragments_ActorStatus { - return { - parallelUnit: isSet(object.parallelUnit) ? ParallelUnit.fromJSON(object.parallelUnit) : undefined, - state: isSet(object.state) - ? tableFragments_ActorStatus_ActorStateFromJSON(object.state) - : TableFragments_ActorStatus_ActorState.UNSPECIFIED, - }; - }, - - toJSON(message: TableFragments_ActorStatus): unknown { - const obj: any = {}; - message.parallelUnit !== undefined && - (obj.parallelUnit = message.parallelUnit ? ParallelUnit.toJSON(message.parallelUnit) : undefined); - message.state !== undefined && (obj.state = tableFragments_ActorStatus_ActorStateToJSON(message.state)); - return obj; - }, - - fromPartial, I>>(object: I): TableFragments_ActorStatus { - const message = createBaseTableFragments_ActorStatus(); - message.parallelUnit = (object.parallelUnit !== undefined && object.parallelUnit !== null) - ? ParallelUnit.fromPartial(object.parallelUnit) - : undefined; - message.state = object.state ?? TableFragments_ActorStatus_ActorState.UNSPECIFIED; - return message; - }, -}; - -function createBaseTableFragments_Fragment(): TableFragments_Fragment { - return { - fragmentId: 0, - fragmentTypeMask: 0, - distributionType: TableFragments_Fragment_FragmentDistributionType.UNSPECIFIED, - actors: [], - vnodeMapping: undefined, - stateTableIds: [], - upstreamFragmentIds: [], - }; -} - -export const TableFragments_Fragment = { - fromJSON(object: any): TableFragments_Fragment { - return { - fragmentId: isSet(object.fragmentId) ? Number(object.fragmentId) : 0, - fragmentTypeMask: isSet(object.fragmentTypeMask) ? Number(object.fragmentTypeMask) : 0, - distributionType: isSet(object.distributionType) - ? tableFragments_Fragment_FragmentDistributionTypeFromJSON(object.distributionType) - : TableFragments_Fragment_FragmentDistributionType.UNSPECIFIED, - actors: Array.isArray(object?.actors) ? object.actors.map((e: any) => StreamActor.fromJSON(e)) : [], - vnodeMapping: isSet(object.vnodeMapping) ? ParallelUnitMapping.fromJSON(object.vnodeMapping) : undefined, - stateTableIds: Array.isArray(object?.stateTableIds) ? object.stateTableIds.map((e: any) => Number(e)) : [], - upstreamFragmentIds: Array.isArray(object?.upstreamFragmentIds) - ? object.upstreamFragmentIds.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: TableFragments_Fragment): unknown { - const obj: any = {}; - message.fragmentId !== undefined && (obj.fragmentId = Math.round(message.fragmentId)); - message.fragmentTypeMask !== undefined && (obj.fragmentTypeMask = Math.round(message.fragmentTypeMask)); - message.distributionType !== undefined && - (obj.distributionType = tableFragments_Fragment_FragmentDistributionTypeToJSON(message.distributionType)); - if (message.actors) { - obj.actors = message.actors.map((e) => e ? StreamActor.toJSON(e) : undefined); - } else { - obj.actors = []; - } - message.vnodeMapping !== undefined && - (obj.vnodeMapping = message.vnodeMapping ? ParallelUnitMapping.toJSON(message.vnodeMapping) : undefined); - if (message.stateTableIds) { - obj.stateTableIds = message.stateTableIds.map((e) => Math.round(e)); - } else { - obj.stateTableIds = []; - } - if (message.upstreamFragmentIds) { - obj.upstreamFragmentIds = message.upstreamFragmentIds.map((e) => Math.round(e)); - } else { - obj.upstreamFragmentIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): TableFragments_Fragment { - const message = createBaseTableFragments_Fragment(); - message.fragmentId = object.fragmentId ?? 0; - message.fragmentTypeMask = object.fragmentTypeMask ?? 0; - message.distributionType = object.distributionType ?? TableFragments_Fragment_FragmentDistributionType.UNSPECIFIED; - message.actors = object.actors?.map((e) => StreamActor.fromPartial(e)) || []; - message.vnodeMapping = (object.vnodeMapping !== undefined && object.vnodeMapping !== null) - ? ParallelUnitMapping.fromPartial(object.vnodeMapping) - : undefined; - message.stateTableIds = object.stateTableIds?.map((e) => e) || []; - message.upstreamFragmentIds = object.upstreamFragmentIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseTableFragments_FragmentsEntry(): TableFragments_FragmentsEntry { - return { key: 0, value: undefined }; -} - -export const TableFragments_FragmentsEntry = { - fromJSON(object: any): TableFragments_FragmentsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableFragments_Fragment.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: TableFragments_FragmentsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? TableFragments_Fragment.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): TableFragments_FragmentsEntry { - const message = createBaseTableFragments_FragmentsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableFragments_Fragment.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseTableFragments_ActorStatusEntry(): TableFragments_ActorStatusEntry { - return { key: 0, value: undefined }; -} - -export const TableFragments_ActorStatusEntry = { - fromJSON(object: any): TableFragments_ActorStatusEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableFragments_ActorStatus.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: TableFragments_ActorStatusEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? TableFragments_ActorStatus.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): TableFragments_ActorStatusEntry { - const message = createBaseTableFragments_ActorStatusEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableFragments_ActorStatus.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseTableFragments_ActorSplitsEntry(): TableFragments_ActorSplitsEntry { - return { key: 0, value: undefined }; -} - -export const TableFragments_ActorSplitsEntry = { - fromJSON(object: any): TableFragments_ActorSplitsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ConnectorSplits.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: TableFragments_ActorSplitsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? ConnectorSplits.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): TableFragments_ActorSplitsEntry { - const message = createBaseTableFragments_ActorSplitsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ConnectorSplits.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseFragmentParallelUnitMapping(): FragmentParallelUnitMapping { - return { fragmentId: 0, mapping: undefined }; -} - -export const FragmentParallelUnitMapping = { - fromJSON(object: any): FragmentParallelUnitMapping { - return { - fragmentId: isSet(object.fragmentId) ? Number(object.fragmentId) : 0, - mapping: isSet(object.mapping) ? ParallelUnitMapping.fromJSON(object.mapping) : undefined, - }; - }, - - toJSON(message: FragmentParallelUnitMapping): unknown { - const obj: any = {}; - message.fragmentId !== undefined && (obj.fragmentId = Math.round(message.fragmentId)); - message.mapping !== undefined && - (obj.mapping = message.mapping ? ParallelUnitMapping.toJSON(message.mapping) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): FragmentParallelUnitMapping { - const message = createBaseFragmentParallelUnitMapping(); - message.fragmentId = object.fragmentId ?? 0; - message.mapping = (object.mapping !== undefined && object.mapping !== null) - ? ParallelUnitMapping.fromPartial(object.mapping) - : undefined; - return message; - }, -}; - -function createBaseActorLocation(): ActorLocation { - return { node: undefined, actors: [] }; -} - -export const ActorLocation = { - fromJSON(object: any): ActorLocation { - return { - node: isSet(object.node) ? WorkerNode.fromJSON(object.node) : undefined, - actors: Array.isArray(object?.actors) ? object.actors.map((e: any) => StreamActor.fromJSON(e)) : [], - }; - }, - - toJSON(message: ActorLocation): unknown { - const obj: any = {}; - message.node !== undefined && (obj.node = message.node ? WorkerNode.toJSON(message.node) : undefined); - if (message.actors) { - obj.actors = message.actors.map((e) => e ? StreamActor.toJSON(e) : undefined); - } else { - obj.actors = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ActorLocation { - const message = createBaseActorLocation(); - message.node = (object.node !== undefined && object.node !== null) - ? WorkerNode.fromPartial(object.node) - : undefined; - message.actors = object.actors?.map((e) => StreamActor.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseFlushRequest(): FlushRequest { - return { checkpoint: false }; -} - -export const FlushRequest = { - fromJSON(object: any): FlushRequest { - return { checkpoint: isSet(object.checkpoint) ? Boolean(object.checkpoint) : false }; - }, - - toJSON(message: FlushRequest): unknown { - const obj: any = {}; - message.checkpoint !== undefined && (obj.checkpoint = message.checkpoint); - return obj; - }, - - fromPartial, I>>(object: I): FlushRequest { - const message = createBaseFlushRequest(); - message.checkpoint = object.checkpoint ?? false; - return message; - }, -}; - -function createBaseFlushResponse(): FlushResponse { - return { status: undefined, snapshot: undefined }; -} - -export const FlushResponse = { - fromJSON(object: any): FlushResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - snapshot: isSet(object.snapshot) ? HummockSnapshot.fromJSON(object.snapshot) : undefined, - }; - }, - - toJSON(message: FlushResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.snapshot !== undefined && - (obj.snapshot = message.snapshot ? HummockSnapshot.toJSON(message.snapshot) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): FlushResponse { - const message = createBaseFlushResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.snapshot = (object.snapshot !== undefined && object.snapshot !== null) - ? HummockSnapshot.fromPartial(object.snapshot) - : undefined; - return message; - }, -}; - -function createBaseCreatingJobInfo(): CreatingJobInfo { - return { databaseId: 0, schemaId: 0, name: "" }; -} - -export const CreatingJobInfo = { - fromJSON(object: any): CreatingJobInfo { - return { - databaseId: isSet(object.databaseId) ? Number(object.databaseId) : 0, - schemaId: isSet(object.schemaId) ? Number(object.schemaId) : 0, - name: isSet(object.name) ? String(object.name) : "", - }; - }, - - toJSON(message: CreatingJobInfo): unknown { - const obj: any = {}; - message.databaseId !== undefined && (obj.databaseId = Math.round(message.databaseId)); - message.schemaId !== undefined && (obj.schemaId = Math.round(message.schemaId)); - message.name !== undefined && (obj.name = message.name); - return obj; - }, - - fromPartial, I>>(object: I): CreatingJobInfo { - const message = createBaseCreatingJobInfo(); - message.databaseId = object.databaseId ?? 0; - message.schemaId = object.schemaId ?? 0; - message.name = object.name ?? ""; - return message; - }, -}; - -function createBaseCancelCreatingJobsRequest(): CancelCreatingJobsRequest { - return { infos: [] }; -} - -export const CancelCreatingJobsRequest = { - fromJSON(object: any): CancelCreatingJobsRequest { - return { infos: Array.isArray(object?.infos) ? object.infos.map((e: any) => CreatingJobInfo.fromJSON(e)) : [] }; - }, - - toJSON(message: CancelCreatingJobsRequest): unknown { - const obj: any = {}; - if (message.infos) { - obj.infos = message.infos.map((e) => e ? CreatingJobInfo.toJSON(e) : undefined); - } else { - obj.infos = []; - } - return obj; - }, - - fromPartial, I>>(object: I): CancelCreatingJobsRequest { - const message = createBaseCancelCreatingJobsRequest(); - message.infos = object.infos?.map((e) => CreatingJobInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseCancelCreatingJobsResponse(): CancelCreatingJobsResponse { - return { status: undefined }; -} - -export const CancelCreatingJobsResponse = { - fromJSON(object: any): CancelCreatingJobsResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: CancelCreatingJobsResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CancelCreatingJobsResponse { - const message = createBaseCancelCreatingJobsResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseListTableFragmentsRequest(): ListTableFragmentsRequest { - return { tableIds: [] }; -} - -export const ListTableFragmentsRequest = { - fromJSON(object: any): ListTableFragmentsRequest { - return { tableIds: Array.isArray(object?.tableIds) ? object.tableIds.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: ListTableFragmentsRequest): unknown { - const obj: any = {}; - if (message.tableIds) { - obj.tableIds = message.tableIds.map((e) => Math.round(e)); - } else { - obj.tableIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ListTableFragmentsRequest { - const message = createBaseListTableFragmentsRequest(); - message.tableIds = object.tableIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseListTableFragmentsResponse(): ListTableFragmentsResponse { - return { tableFragments: {} }; -} - -export const ListTableFragmentsResponse = { - fromJSON(object: any): ListTableFragmentsResponse { - return { - tableFragments: isObject(object.tableFragments) - ? Object.entries(object.tableFragments).reduce<{ [key: number]: ListTableFragmentsResponse_TableFragmentInfo }>( - (acc, [key, value]) => { - acc[Number(key)] = ListTableFragmentsResponse_TableFragmentInfo.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - }; - }, - - toJSON(message: ListTableFragmentsResponse): unknown { - const obj: any = {}; - obj.tableFragments = {}; - if (message.tableFragments) { - Object.entries(message.tableFragments).forEach(([k, v]) => { - obj.tableFragments[k] = ListTableFragmentsResponse_TableFragmentInfo.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): ListTableFragmentsResponse { - const message = createBaseListTableFragmentsResponse(); - message.tableFragments = Object.entries(object.tableFragments ?? {}).reduce< - { [key: number]: ListTableFragmentsResponse_TableFragmentInfo } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ListTableFragmentsResponse_TableFragmentInfo.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseListTableFragmentsResponse_ActorInfo(): ListTableFragmentsResponse_ActorInfo { - return { id: 0, node: undefined, dispatcher: [] }; -} - -export const ListTableFragmentsResponse_ActorInfo = { - fromJSON(object: any): ListTableFragmentsResponse_ActorInfo { - return { - id: isSet(object.id) ? Number(object.id) : 0, - node: isSet(object.node) ? StreamNode.fromJSON(object.node) : undefined, - dispatcher: Array.isArray(object?.dispatcher) ? object.dispatcher.map((e: any) => Dispatcher.fromJSON(e)) : [], - }; - }, - - toJSON(message: ListTableFragmentsResponse_ActorInfo): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.node !== undefined && (obj.node = message.node ? StreamNode.toJSON(message.node) : undefined); - if (message.dispatcher) { - obj.dispatcher = message.dispatcher.map((e) => e ? Dispatcher.toJSON(e) : undefined); - } else { - obj.dispatcher = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): ListTableFragmentsResponse_ActorInfo { - const message = createBaseListTableFragmentsResponse_ActorInfo(); - message.id = object.id ?? 0; - message.node = (object.node !== undefined && object.node !== null) - ? StreamNode.fromPartial(object.node) - : undefined; - message.dispatcher = object.dispatcher?.map((e) => Dispatcher.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseListTableFragmentsResponse_FragmentInfo(): ListTableFragmentsResponse_FragmentInfo { - return { id: 0, actors: [] }; -} - -export const ListTableFragmentsResponse_FragmentInfo = { - fromJSON(object: any): ListTableFragmentsResponse_FragmentInfo { - return { - id: isSet(object.id) ? Number(object.id) : 0, - actors: Array.isArray(object?.actors) - ? object.actors.map((e: any) => ListTableFragmentsResponse_ActorInfo.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ListTableFragmentsResponse_FragmentInfo): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - if (message.actors) { - obj.actors = message.actors.map((e) => e ? ListTableFragmentsResponse_ActorInfo.toJSON(e) : undefined); - } else { - obj.actors = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): ListTableFragmentsResponse_FragmentInfo { - const message = createBaseListTableFragmentsResponse_FragmentInfo(); - message.id = object.id ?? 0; - message.actors = object.actors?.map((e) => ListTableFragmentsResponse_ActorInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseListTableFragmentsResponse_TableFragmentInfo(): ListTableFragmentsResponse_TableFragmentInfo { - return { fragments: [], env: undefined }; -} - -export const ListTableFragmentsResponse_TableFragmentInfo = { - fromJSON(object: any): ListTableFragmentsResponse_TableFragmentInfo { - return { - fragments: Array.isArray(object?.fragments) - ? object.fragments.map((e: any) => ListTableFragmentsResponse_FragmentInfo.fromJSON(e)) - : [], - env: isSet(object.env) ? StreamEnvironment.fromJSON(object.env) : undefined, - }; - }, - - toJSON(message: ListTableFragmentsResponse_TableFragmentInfo): unknown { - const obj: any = {}; - if (message.fragments) { - obj.fragments = message.fragments.map((e) => e ? ListTableFragmentsResponse_FragmentInfo.toJSON(e) : undefined); - } else { - obj.fragments = []; - } - message.env !== undefined && (obj.env = message.env ? StreamEnvironment.toJSON(message.env) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ListTableFragmentsResponse_TableFragmentInfo { - const message = createBaseListTableFragmentsResponse_TableFragmentInfo(); - message.fragments = object.fragments?.map((e) => ListTableFragmentsResponse_FragmentInfo.fromPartial(e)) || []; - message.env = (object.env !== undefined && object.env !== null) - ? StreamEnvironment.fromPartial(object.env) - : undefined; - return message; - }, -}; - -function createBaseListTableFragmentsResponse_TableFragmentsEntry(): ListTableFragmentsResponse_TableFragmentsEntry { - return { key: 0, value: undefined }; -} - -export const ListTableFragmentsResponse_TableFragmentsEntry = { - fromJSON(object: any): ListTableFragmentsResponse_TableFragmentsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ListTableFragmentsResponse_TableFragmentInfo.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: ListTableFragmentsResponse_TableFragmentsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? ListTableFragmentsResponse_TableFragmentInfo.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ListTableFragmentsResponse_TableFragmentsEntry { - const message = createBaseListTableFragmentsResponse_TableFragmentsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ListTableFragmentsResponse_TableFragmentInfo.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseAddWorkerNodeRequest(): AddWorkerNodeRequest { - return { workerType: WorkerType.UNSPECIFIED, host: undefined, workerNodeParallelism: 0 }; -} - -export const AddWorkerNodeRequest = { - fromJSON(object: any): AddWorkerNodeRequest { - return { - workerType: isSet(object.workerType) ? workerTypeFromJSON(object.workerType) : WorkerType.UNSPECIFIED, - host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined, - workerNodeParallelism: isSet(object.workerNodeParallelism) ? Number(object.workerNodeParallelism) : 0, - }; - }, - - toJSON(message: AddWorkerNodeRequest): unknown { - const obj: any = {}; - message.workerType !== undefined && (obj.workerType = workerTypeToJSON(message.workerType)); - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - message.workerNodeParallelism !== undefined && - (obj.workerNodeParallelism = Math.round(message.workerNodeParallelism)); - return obj; - }, - - fromPartial, I>>(object: I): AddWorkerNodeRequest { - const message = createBaseAddWorkerNodeRequest(); - message.workerType = object.workerType ?? WorkerType.UNSPECIFIED; - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - message.workerNodeParallelism = object.workerNodeParallelism ?? 0; - return message; - }, -}; - -function createBaseAddWorkerNodeResponse(): AddWorkerNodeResponse { - return { status: undefined, node: undefined }; -} - -export const AddWorkerNodeResponse = { - fromJSON(object: any): AddWorkerNodeResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - node: isSet(object.node) ? WorkerNode.fromJSON(object.node) : undefined, - }; - }, - - toJSON(message: AddWorkerNodeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.node !== undefined && (obj.node = message.node ? WorkerNode.toJSON(message.node) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AddWorkerNodeResponse { - const message = createBaseAddWorkerNodeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.node = (object.node !== undefined && object.node !== null) - ? WorkerNode.fromPartial(object.node) - : undefined; - return message; - }, -}; - -function createBaseActivateWorkerNodeRequest(): ActivateWorkerNodeRequest { - return { host: undefined }; -} - -export const ActivateWorkerNodeRequest = { - fromJSON(object: any): ActivateWorkerNodeRequest { - return { host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined }; - }, - - toJSON(message: ActivateWorkerNodeRequest): unknown { - const obj: any = {}; - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ActivateWorkerNodeRequest { - const message = createBaseActivateWorkerNodeRequest(); - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - return message; - }, -}; - -function createBaseActivateWorkerNodeResponse(): ActivateWorkerNodeResponse { - return { status: undefined }; -} - -export const ActivateWorkerNodeResponse = { - fromJSON(object: any): ActivateWorkerNodeResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: ActivateWorkerNodeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ActivateWorkerNodeResponse { - const message = createBaseActivateWorkerNodeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseDeleteWorkerNodeRequest(): DeleteWorkerNodeRequest { - return { host: undefined }; -} - -export const DeleteWorkerNodeRequest = { - fromJSON(object: any): DeleteWorkerNodeRequest { - return { host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined }; - }, - - toJSON(message: DeleteWorkerNodeRequest): unknown { - const obj: any = {}; - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): DeleteWorkerNodeRequest { - const message = createBaseDeleteWorkerNodeRequest(); - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - return message; - }, -}; - -function createBaseDeleteWorkerNodeResponse(): DeleteWorkerNodeResponse { - return { status: undefined }; -} - -export const DeleteWorkerNodeResponse = { - fromJSON(object: any): DeleteWorkerNodeResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: DeleteWorkerNodeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): DeleteWorkerNodeResponse { - const message = createBaseDeleteWorkerNodeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseListAllNodesRequest(): ListAllNodesRequest { - return { workerType: WorkerType.UNSPECIFIED, includeStartingNodes: false }; -} - -export const ListAllNodesRequest = { - fromJSON(object: any): ListAllNodesRequest { - return { - workerType: isSet(object.workerType) ? workerTypeFromJSON(object.workerType) : WorkerType.UNSPECIFIED, - includeStartingNodes: isSet(object.includeStartingNodes) ? Boolean(object.includeStartingNodes) : false, - }; - }, - - toJSON(message: ListAllNodesRequest): unknown { - const obj: any = {}; - message.workerType !== undefined && (obj.workerType = workerTypeToJSON(message.workerType)); - message.includeStartingNodes !== undefined && (obj.includeStartingNodes = message.includeStartingNodes); - return obj; - }, - - fromPartial, I>>(object: I): ListAllNodesRequest { - const message = createBaseListAllNodesRequest(); - message.workerType = object.workerType ?? WorkerType.UNSPECIFIED; - message.includeStartingNodes = object.includeStartingNodes ?? false; - return message; - }, -}; - -function createBaseListAllNodesResponse(): ListAllNodesResponse { - return { status: undefined, nodes: [] }; -} - -export const ListAllNodesResponse = { - fromJSON(object: any): ListAllNodesResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - nodes: Array.isArray(object?.nodes) ? object.nodes.map((e: any) => WorkerNode.fromJSON(e)) : [], - }; - }, - - toJSON(message: ListAllNodesResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - if (message.nodes) { - obj.nodes = message.nodes.map((e) => e ? WorkerNode.toJSON(e) : undefined); - } else { - obj.nodes = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ListAllNodesResponse { - const message = createBaseListAllNodesResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.nodes = object.nodes?.map((e) => WorkerNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSubscribeRequest(): SubscribeRequest { - return { subscribeType: SubscribeType.UNSPECIFIED, host: undefined, workerId: 0 }; -} - -export const SubscribeRequest = { - fromJSON(object: any): SubscribeRequest { - return { - subscribeType: isSet(object.subscribeType) - ? subscribeTypeFromJSON(object.subscribeType) - : SubscribeType.UNSPECIFIED, - host: isSet(object.host) ? HostAddress.fromJSON(object.host) : undefined, - workerId: isSet(object.workerId) ? Number(object.workerId) : 0, - }; - }, - - toJSON(message: SubscribeRequest): unknown { - const obj: any = {}; - message.subscribeType !== undefined && (obj.subscribeType = subscribeTypeToJSON(message.subscribeType)); - message.host !== undefined && (obj.host = message.host ? HostAddress.toJSON(message.host) : undefined); - message.workerId !== undefined && (obj.workerId = Math.round(message.workerId)); - return obj; - }, - - fromPartial, I>>(object: I): SubscribeRequest { - const message = createBaseSubscribeRequest(); - message.subscribeType = object.subscribeType ?? SubscribeType.UNSPECIFIED; - message.host = (object.host !== undefined && object.host !== null) - ? HostAddress.fromPartial(object.host) - : undefined; - message.workerId = object.workerId ?? 0; - return message; - }, -}; - -function createBaseMetaSnapshot(): MetaSnapshot { - return { - databases: [], - schemas: [], - sources: [], - sinks: [], - tables: [], - indexes: [], - views: [], - functions: [], - users: [], - parallelUnitMappings: [], - nodes: [], - hummockSnapshot: undefined, - hummockVersion: undefined, - version: undefined, - metaBackupManifestId: undefined, - }; -} - -export const MetaSnapshot = { - fromJSON(object: any): MetaSnapshot { - return { - databases: Array.isArray(object?.databases) ? object.databases.map((e: any) => Database.fromJSON(e)) : [], - schemas: Array.isArray(object?.schemas) ? object.schemas.map((e: any) => Schema.fromJSON(e)) : [], - sources: Array.isArray(object?.sources) ? object.sources.map((e: any) => Source.fromJSON(e)) : [], - sinks: Array.isArray(object?.sinks) ? object.sinks.map((e: any) => Sink.fromJSON(e)) : [], - tables: Array.isArray(object?.tables) ? object.tables.map((e: any) => Table.fromJSON(e)) : [], - indexes: Array.isArray(object?.indexes) ? object.indexes.map((e: any) => Index.fromJSON(e)) : [], - views: Array.isArray(object?.views) ? object.views.map((e: any) => View.fromJSON(e)) : [], - functions: Array.isArray(object?.functions) ? object.functions.map((e: any) => Function.fromJSON(e)) : [], - users: Array.isArray(object?.users) ? object.users.map((e: any) => UserInfo.fromJSON(e)) : [], - parallelUnitMappings: Array.isArray(object?.parallelUnitMappings) - ? object.parallelUnitMappings.map((e: any) => FragmentParallelUnitMapping.fromJSON(e)) - : [], - nodes: Array.isArray(object?.nodes) - ? object.nodes.map((e: any) => WorkerNode.fromJSON(e)) - : [], - hummockSnapshot: isSet(object.hummockSnapshot) ? HummockSnapshot.fromJSON(object.hummockSnapshot) : undefined, - hummockVersion: isSet(object.hummockVersion) ? HummockVersion.fromJSON(object.hummockVersion) : undefined, - version: isSet(object.version) ? MetaSnapshot_SnapshotVersion.fromJSON(object.version) : undefined, - metaBackupManifestId: isSet(object.metaBackupManifestId) - ? MetaBackupManifestId.fromJSON(object.metaBackupManifestId) - : undefined, - }; - }, - - toJSON(message: MetaSnapshot): unknown { - const obj: any = {}; - if (message.databases) { - obj.databases = message.databases.map((e) => e ? Database.toJSON(e) : undefined); - } else { - obj.databases = []; - } - if (message.schemas) { - obj.schemas = message.schemas.map((e) => e ? Schema.toJSON(e) : undefined); - } else { - obj.schemas = []; - } - if (message.sources) { - obj.sources = message.sources.map((e) => e ? Source.toJSON(e) : undefined); - } else { - obj.sources = []; - } - if (message.sinks) { - obj.sinks = message.sinks.map((e) => e ? Sink.toJSON(e) : undefined); - } else { - obj.sinks = []; - } - if (message.tables) { - obj.tables = message.tables.map((e) => e ? Table.toJSON(e) : undefined); - } else { - obj.tables = []; - } - if (message.indexes) { - obj.indexes = message.indexes.map((e) => e ? Index.toJSON(e) : undefined); - } else { - obj.indexes = []; - } - if (message.views) { - obj.views = message.views.map((e) => e ? View.toJSON(e) : undefined); - } else { - obj.views = []; - } - if (message.functions) { - obj.functions = message.functions.map((e) => e ? Function.toJSON(e) : undefined); - } else { - obj.functions = []; - } - if (message.users) { - obj.users = message.users.map((e) => e ? UserInfo.toJSON(e) : undefined); - } else { - obj.users = []; - } - if (message.parallelUnitMappings) { - obj.parallelUnitMappings = message.parallelUnitMappings.map((e) => - e ? FragmentParallelUnitMapping.toJSON(e) : undefined - ); - } else { - obj.parallelUnitMappings = []; - } - if (message.nodes) { - obj.nodes = message.nodes.map((e) => e ? WorkerNode.toJSON(e) : undefined); - } else { - obj.nodes = []; - } - message.hummockSnapshot !== undefined && - (obj.hummockSnapshot = message.hummockSnapshot ? HummockSnapshot.toJSON(message.hummockSnapshot) : undefined); - message.hummockVersion !== undefined && - (obj.hummockVersion = message.hummockVersion ? HummockVersion.toJSON(message.hummockVersion) : undefined); - message.version !== undefined && - (obj.version = message.version ? MetaSnapshot_SnapshotVersion.toJSON(message.version) : undefined); - message.metaBackupManifestId !== undefined && (obj.metaBackupManifestId = message.metaBackupManifestId - ? MetaBackupManifestId.toJSON(message.metaBackupManifestId) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): MetaSnapshot { - const message = createBaseMetaSnapshot(); - message.databases = object.databases?.map((e) => Database.fromPartial(e)) || []; - message.schemas = object.schemas?.map((e) => Schema.fromPartial(e)) || []; - message.sources = object.sources?.map((e) => Source.fromPartial(e)) || []; - message.sinks = object.sinks?.map((e) => Sink.fromPartial(e)) || []; - message.tables = object.tables?.map((e) => Table.fromPartial(e)) || []; - message.indexes = object.indexes?.map((e) => Index.fromPartial(e)) || []; - message.views = object.views?.map((e) => View.fromPartial(e)) || []; - message.functions = object.functions?.map((e) => Function.fromPartial(e)) || []; - message.users = object.users?.map((e) => UserInfo.fromPartial(e)) || []; - message.parallelUnitMappings = - object.parallelUnitMappings?.map((e) => FragmentParallelUnitMapping.fromPartial(e)) || []; - message.nodes = object.nodes?.map((e) => WorkerNode.fromPartial(e)) || []; - message.hummockSnapshot = (object.hummockSnapshot !== undefined && object.hummockSnapshot !== null) - ? HummockSnapshot.fromPartial(object.hummockSnapshot) - : undefined; - message.hummockVersion = (object.hummockVersion !== undefined && object.hummockVersion !== null) - ? HummockVersion.fromPartial(object.hummockVersion) - : undefined; - message.version = (object.version !== undefined && object.version !== null) - ? MetaSnapshot_SnapshotVersion.fromPartial(object.version) - : undefined; - message.metaBackupManifestId = (object.metaBackupManifestId !== undefined && object.metaBackupManifestId !== null) - ? MetaBackupManifestId.fromPartial(object.metaBackupManifestId) - : undefined; - return message; - }, -}; - -function createBaseMetaSnapshot_SnapshotVersion(): MetaSnapshot_SnapshotVersion { - return { catalogVersion: 0, parallelUnitMappingVersion: 0, workerNodeVersion: 0 }; -} - -export const MetaSnapshot_SnapshotVersion = { - fromJSON(object: any): MetaSnapshot_SnapshotVersion { - return { - catalogVersion: isSet(object.catalogVersion) ? Number(object.catalogVersion) : 0, - parallelUnitMappingVersion: isSet(object.parallelUnitMappingVersion) - ? Number(object.parallelUnitMappingVersion) - : 0, - workerNodeVersion: isSet(object.workerNodeVersion) ? Number(object.workerNodeVersion) : 0, - }; - }, - - toJSON(message: MetaSnapshot_SnapshotVersion): unknown { - const obj: any = {}; - message.catalogVersion !== undefined && (obj.catalogVersion = Math.round(message.catalogVersion)); - message.parallelUnitMappingVersion !== undefined && - (obj.parallelUnitMappingVersion = Math.round(message.parallelUnitMappingVersion)); - message.workerNodeVersion !== undefined && (obj.workerNodeVersion = Math.round(message.workerNodeVersion)); - return obj; - }, - - fromPartial, I>>(object: I): MetaSnapshot_SnapshotVersion { - const message = createBaseMetaSnapshot_SnapshotVersion(); - message.catalogVersion = object.catalogVersion ?? 0; - message.parallelUnitMappingVersion = object.parallelUnitMappingVersion ?? 0; - message.workerNodeVersion = object.workerNodeVersion ?? 0; - return message; - }, -}; - -function createBaseSubscribeResponse(): SubscribeResponse { - return { status: undefined, operation: SubscribeResponse_Operation.UNSPECIFIED, version: 0, info: undefined }; -} - -export const SubscribeResponse = { - fromJSON(object: any): SubscribeResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - operation: isSet(object.operation) - ? subscribeResponse_OperationFromJSON(object.operation) - : SubscribeResponse_Operation.UNSPECIFIED, - version: isSet(object.version) ? Number(object.version) : 0, - info: isSet(object.database) - ? { $case: "database", database: Database.fromJSON(object.database) } - : isSet(object.schema) - ? { $case: "schema", schema: Schema.fromJSON(object.schema) } - : isSet(object.table) - ? { $case: "table", table: Table.fromJSON(object.table) } - : isSet(object.source) - ? { $case: "source", source: Source.fromJSON(object.source) } - : isSet(object.sink) - ? { $case: "sink", sink: Sink.fromJSON(object.sink) } - : isSet(object.index) - ? { $case: "index", index: Index.fromJSON(object.index) } - : isSet(object.view) - ? { $case: "view", view: View.fromJSON(object.view) } - : isSet(object.function) - ? { $case: "function", function: Function.fromJSON(object.function) } - : isSet(object.user) - ? { $case: "user", user: UserInfo.fromJSON(object.user) } - : isSet(object.parallelUnitMapping) - ? { - $case: "parallelUnitMapping", - parallelUnitMapping: FragmentParallelUnitMapping.fromJSON(object.parallelUnitMapping), - } - : isSet(object.node) - ? { $case: "node", node: WorkerNode.fromJSON(object.node) } - : isSet(object.hummockSnapshot) - ? { $case: "hummockSnapshot", hummockSnapshot: HummockSnapshot.fromJSON(object.hummockSnapshot) } - : isSet(object.hummockVersionDeltas) - ? { - $case: "hummockVersionDeltas", - hummockVersionDeltas: HummockVersionDeltas.fromJSON(object.hummockVersionDeltas), - } - : isSet(object.snapshot) - ? { $case: "snapshot", snapshot: MetaSnapshot.fromJSON(object.snapshot) } - : isSet(object.metaBackupManifestId) - ? { - $case: "metaBackupManifestId", - metaBackupManifestId: MetaBackupManifestId.fromJSON(object.metaBackupManifestId), - } - : isSet(object.systemParams) - ? { $case: "systemParams", systemParams: SystemParams.fromJSON(object.systemParams) } - : undefined, - }; - }, - - toJSON(message: SubscribeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.operation !== undefined && (obj.operation = subscribeResponse_OperationToJSON(message.operation)); - message.version !== undefined && (obj.version = Math.round(message.version)); - message.info?.$case === "database" && - (obj.database = message.info?.database ? Database.toJSON(message.info?.database) : undefined); - message.info?.$case === "schema" && - (obj.schema = message.info?.schema ? Schema.toJSON(message.info?.schema) : undefined); - message.info?.$case === "table" && - (obj.table = message.info?.table ? Table.toJSON(message.info?.table) : undefined); - message.info?.$case === "source" && - (obj.source = message.info?.source ? Source.toJSON(message.info?.source) : undefined); - message.info?.$case === "sink" && (obj.sink = message.info?.sink ? Sink.toJSON(message.info?.sink) : undefined); - message.info?.$case === "index" && - (obj.index = message.info?.index ? Index.toJSON(message.info?.index) : undefined); - message.info?.$case === "view" && (obj.view = message.info?.view ? View.toJSON(message.info?.view) : undefined); - message.info?.$case === "function" && - (obj.function = message.info?.function ? Function.toJSON(message.info?.function) : undefined); - message.info?.$case === "user" && (obj.user = message.info?.user ? UserInfo.toJSON(message.info?.user) : undefined); - message.info?.$case === "parallelUnitMapping" && (obj.parallelUnitMapping = message.info?.parallelUnitMapping - ? FragmentParallelUnitMapping.toJSON(message.info?.parallelUnitMapping) - : undefined); - message.info?.$case === "node" && - (obj.node = message.info?.node ? WorkerNode.toJSON(message.info?.node) : undefined); - message.info?.$case === "hummockSnapshot" && (obj.hummockSnapshot = message.info?.hummockSnapshot - ? HummockSnapshot.toJSON(message.info?.hummockSnapshot) - : undefined); - message.info?.$case === "hummockVersionDeltas" && (obj.hummockVersionDeltas = message.info?.hummockVersionDeltas - ? HummockVersionDeltas.toJSON(message.info?.hummockVersionDeltas) - : undefined); - message.info?.$case === "snapshot" && - (obj.snapshot = message.info?.snapshot ? MetaSnapshot.toJSON(message.info?.snapshot) : undefined); - message.info?.$case === "metaBackupManifestId" && (obj.metaBackupManifestId = message.info?.metaBackupManifestId - ? MetaBackupManifestId.toJSON(message.info?.metaBackupManifestId) - : undefined); - message.info?.$case === "systemParams" && - (obj.systemParams = message.info?.systemParams ? SystemParams.toJSON(message.info?.systemParams) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SubscribeResponse { - const message = createBaseSubscribeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.operation = object.operation ?? SubscribeResponse_Operation.UNSPECIFIED; - message.version = object.version ?? 0; - if (object.info?.$case === "database" && object.info?.database !== undefined && object.info?.database !== null) { - message.info = { $case: "database", database: Database.fromPartial(object.info.database) }; - } - if (object.info?.$case === "schema" && object.info?.schema !== undefined && object.info?.schema !== null) { - message.info = { $case: "schema", schema: Schema.fromPartial(object.info.schema) }; - } - if (object.info?.$case === "table" && object.info?.table !== undefined && object.info?.table !== null) { - message.info = { $case: "table", table: Table.fromPartial(object.info.table) }; - } - if (object.info?.$case === "source" && object.info?.source !== undefined && object.info?.source !== null) { - message.info = { $case: "source", source: Source.fromPartial(object.info.source) }; - } - if (object.info?.$case === "sink" && object.info?.sink !== undefined && object.info?.sink !== null) { - message.info = { $case: "sink", sink: Sink.fromPartial(object.info.sink) }; - } - if (object.info?.$case === "index" && object.info?.index !== undefined && object.info?.index !== null) { - message.info = { $case: "index", index: Index.fromPartial(object.info.index) }; - } - if (object.info?.$case === "view" && object.info?.view !== undefined && object.info?.view !== null) { - message.info = { $case: "view", view: View.fromPartial(object.info.view) }; - } - if (object.info?.$case === "function" && object.info?.function !== undefined && object.info?.function !== null) { - message.info = { $case: "function", function: Function.fromPartial(object.info.function) }; - } - if (object.info?.$case === "user" && object.info?.user !== undefined && object.info?.user !== null) { - message.info = { $case: "user", user: UserInfo.fromPartial(object.info.user) }; - } - if ( - object.info?.$case === "parallelUnitMapping" && - object.info?.parallelUnitMapping !== undefined && - object.info?.parallelUnitMapping !== null - ) { - message.info = { - $case: "parallelUnitMapping", - parallelUnitMapping: FragmentParallelUnitMapping.fromPartial(object.info.parallelUnitMapping), - }; - } - if (object.info?.$case === "node" && object.info?.node !== undefined && object.info?.node !== null) { - message.info = { $case: "node", node: WorkerNode.fromPartial(object.info.node) }; - } - if ( - object.info?.$case === "hummockSnapshot" && - object.info?.hummockSnapshot !== undefined && - object.info?.hummockSnapshot !== null - ) { - message.info = { - $case: "hummockSnapshot", - hummockSnapshot: HummockSnapshot.fromPartial(object.info.hummockSnapshot), - }; - } - if ( - object.info?.$case === "hummockVersionDeltas" && - object.info?.hummockVersionDeltas !== undefined && - object.info?.hummockVersionDeltas !== null - ) { - message.info = { - $case: "hummockVersionDeltas", - hummockVersionDeltas: HummockVersionDeltas.fromPartial(object.info.hummockVersionDeltas), - }; - } - if (object.info?.$case === "snapshot" && object.info?.snapshot !== undefined && object.info?.snapshot !== null) { - message.info = { $case: "snapshot", snapshot: MetaSnapshot.fromPartial(object.info.snapshot) }; - } - if ( - object.info?.$case === "metaBackupManifestId" && - object.info?.metaBackupManifestId !== undefined && - object.info?.metaBackupManifestId !== null - ) { - message.info = { - $case: "metaBackupManifestId", - metaBackupManifestId: MetaBackupManifestId.fromPartial(object.info.metaBackupManifestId), - }; - } - if ( - object.info?.$case === "systemParams" && - object.info?.systemParams !== undefined && - object.info?.systemParams !== null - ) { - message.info = { $case: "systemParams", systemParams: SystemParams.fromPartial(object.info.systemParams) }; - } - return message; - }, -}; - -function createBasePauseRequest(): PauseRequest { - return {}; -} - -export const PauseRequest = { - fromJSON(_: any): PauseRequest { - return {}; - }, - - toJSON(_: PauseRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): PauseRequest { - const message = createBasePauseRequest(); - return message; - }, -}; - -function createBasePauseResponse(): PauseResponse { - return {}; -} - -export const PauseResponse = { - fromJSON(_: any): PauseResponse { - return {}; - }, - - toJSON(_: PauseResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): PauseResponse { - const message = createBasePauseResponse(); - return message; - }, -}; - -function createBaseResumeRequest(): ResumeRequest { - return {}; -} - -export const ResumeRequest = { - fromJSON(_: any): ResumeRequest { - return {}; - }, - - toJSON(_: ResumeRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): ResumeRequest { - const message = createBaseResumeRequest(); - return message; - }, -}; - -function createBaseResumeResponse(): ResumeResponse { - return {}; -} - -export const ResumeResponse = { - fromJSON(_: any): ResumeResponse { - return {}; - }, - - toJSON(_: ResumeResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): ResumeResponse { - const message = createBaseResumeResponse(); - return message; - }, -}; - -function createBaseGetClusterInfoRequest(): GetClusterInfoRequest { - return {}; -} - -export const GetClusterInfoRequest = { - fromJSON(_: any): GetClusterInfoRequest { - return {}; - }, - - toJSON(_: GetClusterInfoRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetClusterInfoRequest { - const message = createBaseGetClusterInfoRequest(); - return message; - }, -}; - -function createBaseGetClusterInfoResponse(): GetClusterInfoResponse { - return { workerNodes: [], tableFragments: [], actorSplits: {}, sourceInfos: {} }; -} - -export const GetClusterInfoResponse = { - fromJSON(object: any): GetClusterInfoResponse { - return { - workerNodes: Array.isArray(object?.workerNodes) ? object.workerNodes.map((e: any) => WorkerNode.fromJSON(e)) : [], - tableFragments: Array.isArray(object?.tableFragments) - ? object.tableFragments.map((e: any) => TableFragments.fromJSON(e)) - : [], - actorSplits: isObject(object.actorSplits) - ? Object.entries(object.actorSplits).reduce<{ [key: number]: ConnectorSplits }>((acc, [key, value]) => { - acc[Number(key)] = ConnectorSplits.fromJSON(value); - return acc; - }, {}) - : {}, - sourceInfos: isObject(object.sourceInfos) - ? Object.entries(object.sourceInfos).reduce<{ [key: number]: Source }>((acc, [key, value]) => { - acc[Number(key)] = Source.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: GetClusterInfoResponse): unknown { - const obj: any = {}; - if (message.workerNodes) { - obj.workerNodes = message.workerNodes.map((e) => e ? WorkerNode.toJSON(e) : undefined); - } else { - obj.workerNodes = []; - } - if (message.tableFragments) { - obj.tableFragments = message.tableFragments.map((e) => e ? TableFragments.toJSON(e) : undefined); - } else { - obj.tableFragments = []; - } - obj.actorSplits = {}; - if (message.actorSplits) { - Object.entries(message.actorSplits).forEach(([k, v]) => { - obj.actorSplits[k] = ConnectorSplits.toJSON(v); - }); - } - obj.sourceInfos = {}; - if (message.sourceInfos) { - Object.entries(message.sourceInfos).forEach(([k, v]) => { - obj.sourceInfos[k] = Source.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): GetClusterInfoResponse { - const message = createBaseGetClusterInfoResponse(); - message.workerNodes = object.workerNodes?.map((e) => WorkerNode.fromPartial(e)) || []; - message.tableFragments = object.tableFragments?.map((e) => TableFragments.fromPartial(e)) || []; - message.actorSplits = Object.entries(object.actorSplits ?? {}).reduce<{ [key: number]: ConnectorSplits }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ConnectorSplits.fromPartial(value); - } - return acc; - }, - {}, - ); - message.sourceInfos = Object.entries(object.sourceInfos ?? {}).reduce<{ [key: number]: Source }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = Source.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseGetClusterInfoResponse_ActorSplitsEntry(): GetClusterInfoResponse_ActorSplitsEntry { - return { key: 0, value: undefined }; -} - -export const GetClusterInfoResponse_ActorSplitsEntry = { - fromJSON(object: any): GetClusterInfoResponse_ActorSplitsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ConnectorSplits.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: GetClusterInfoResponse_ActorSplitsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? ConnectorSplits.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetClusterInfoResponse_ActorSplitsEntry { - const message = createBaseGetClusterInfoResponse_ActorSplitsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ConnectorSplits.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseGetClusterInfoResponse_SourceInfosEntry(): GetClusterInfoResponse_SourceInfosEntry { - return { key: 0, value: undefined }; -} - -export const GetClusterInfoResponse_SourceInfosEntry = { - fromJSON(object: any): GetClusterInfoResponse_SourceInfosEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? Source.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: GetClusterInfoResponse_SourceInfosEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? Source.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GetClusterInfoResponse_SourceInfosEntry { - const message = createBaseGetClusterInfoResponse_SourceInfosEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? Source.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseRescheduleRequest(): RescheduleRequest { - return { reschedules: {} }; -} - -export const RescheduleRequest = { - fromJSON(object: any): RescheduleRequest { - return { - reschedules: isObject(object.reschedules) - ? Object.entries(object.reschedules).reduce<{ [key: number]: RescheduleRequest_Reschedule }>( - (acc, [key, value]) => { - acc[Number(key)] = RescheduleRequest_Reschedule.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - }; - }, - - toJSON(message: RescheduleRequest): unknown { - const obj: any = {}; - obj.reschedules = {}; - if (message.reschedules) { - Object.entries(message.reschedules).forEach(([k, v]) => { - obj.reschedules[k] = RescheduleRequest_Reschedule.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): RescheduleRequest { - const message = createBaseRescheduleRequest(); - message.reschedules = Object.entries(object.reschedules ?? {}).reduce< - { [key: number]: RescheduleRequest_Reschedule } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = RescheduleRequest_Reschedule.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseRescheduleRequest_Reschedule(): RescheduleRequest_Reschedule { - return { addedParallelUnits: [], removedParallelUnits: [] }; -} - -export const RescheduleRequest_Reschedule = { - fromJSON(object: any): RescheduleRequest_Reschedule { - return { - addedParallelUnits: Array.isArray(object?.addedParallelUnits) - ? object.addedParallelUnits.map((e: any) => Number(e)) - : [], - removedParallelUnits: Array.isArray(object?.removedParallelUnits) - ? object.removedParallelUnits.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: RescheduleRequest_Reschedule): unknown { - const obj: any = {}; - if (message.addedParallelUnits) { - obj.addedParallelUnits = message.addedParallelUnits.map((e) => Math.round(e)); - } else { - obj.addedParallelUnits = []; - } - if (message.removedParallelUnits) { - obj.removedParallelUnits = message.removedParallelUnits.map((e) => Math.round(e)); - } else { - obj.removedParallelUnits = []; - } - return obj; - }, - - fromPartial, I>>(object: I): RescheduleRequest_Reschedule { - const message = createBaseRescheduleRequest_Reschedule(); - message.addedParallelUnits = object.addedParallelUnits?.map((e) => e) || []; - message.removedParallelUnits = object.removedParallelUnits?.map((e) => e) || []; - return message; - }, -}; - -function createBaseRescheduleRequest_ReschedulesEntry(): RescheduleRequest_ReschedulesEntry { - return { key: 0, value: undefined }; -} - -export const RescheduleRequest_ReschedulesEntry = { - fromJSON(object: any): RescheduleRequest_ReschedulesEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? RescheduleRequest_Reschedule.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: RescheduleRequest_ReschedulesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? RescheduleRequest_Reschedule.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): RescheduleRequest_ReschedulesEntry { - const message = createBaseRescheduleRequest_ReschedulesEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? RescheduleRequest_Reschedule.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseRescheduleResponse(): RescheduleResponse { - return { success: false }; -} - -export const RescheduleResponse = { - fromJSON(object: any): RescheduleResponse { - return { success: isSet(object.success) ? Boolean(object.success) : false }; - }, - - toJSON(message: RescheduleResponse): unknown { - const obj: any = {}; - message.success !== undefined && (obj.success = message.success); - return obj; - }, - - fromPartial, I>>(object: I): RescheduleResponse { - const message = createBaseRescheduleResponse(); - message.success = object.success ?? false; - return message; - }, -}; - -function createBaseMembersRequest(): MembersRequest { - return {}; -} - -export const MembersRequest = { - fromJSON(_: any): MembersRequest { - return {}; - }, - - toJSON(_: MembersRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): MembersRequest { - const message = createBaseMembersRequest(); - return message; - }, -}; - -function createBaseMetaMember(): MetaMember { - return { address: undefined, isLeader: false }; -} - -export const MetaMember = { - fromJSON(object: any): MetaMember { - return { - address: isSet(object.address) ? HostAddress.fromJSON(object.address) : undefined, - isLeader: isSet(object.isLeader) ? Boolean(object.isLeader) : false, - }; - }, - - toJSON(message: MetaMember): unknown { - const obj: any = {}; - message.address !== undefined && (obj.address = message.address ? HostAddress.toJSON(message.address) : undefined); - message.isLeader !== undefined && (obj.isLeader = message.isLeader); - return obj; - }, - - fromPartial, I>>(object: I): MetaMember { - const message = createBaseMetaMember(); - message.address = (object.address !== undefined && object.address !== null) - ? HostAddress.fromPartial(object.address) - : undefined; - message.isLeader = object.isLeader ?? false; - return message; - }, -}; - -function createBaseMembersResponse(): MembersResponse { - return { members: [] }; -} - -export const MembersResponse = { - fromJSON(object: any): MembersResponse { - return { members: Array.isArray(object?.members) ? object.members.map((e: any) => MetaMember.fromJSON(e)) : [] }; - }, - - toJSON(message: MembersResponse): unknown { - const obj: any = {}; - if (message.members) { - obj.members = message.members.map((e) => e ? MetaMember.toJSON(e) : undefined); - } else { - obj.members = []; - } - return obj; - }, - - fromPartial, I>>(object: I): MembersResponse { - const message = createBaseMembersResponse(); - message.members = object.members?.map((e) => MetaMember.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSystemParams(): SystemParams { - return { - barrierIntervalMs: undefined, - checkpointFrequency: undefined, - sstableSizeMb: undefined, - blockSizeKb: undefined, - bloomFalsePositive: undefined, - stateStore: undefined, - dataDirectory: undefined, - backupStorageUrl: undefined, - backupStorageDirectory: undefined, - }; -} - -export const SystemParams = { - fromJSON(object: any): SystemParams { - return { - barrierIntervalMs: isSet(object.barrierIntervalMs) ? Number(object.barrierIntervalMs) : undefined, - checkpointFrequency: isSet(object.checkpointFrequency) ? Number(object.checkpointFrequency) : undefined, - sstableSizeMb: isSet(object.sstableSizeMb) ? Number(object.sstableSizeMb) : undefined, - blockSizeKb: isSet(object.blockSizeKb) ? Number(object.blockSizeKb) : undefined, - bloomFalsePositive: isSet(object.bloomFalsePositive) ? Number(object.bloomFalsePositive) : undefined, - stateStore: isSet(object.stateStore) ? String(object.stateStore) : undefined, - dataDirectory: isSet(object.dataDirectory) ? String(object.dataDirectory) : undefined, - backupStorageUrl: isSet(object.backupStorageUrl) ? String(object.backupStorageUrl) : undefined, - backupStorageDirectory: isSet(object.backupStorageDirectory) ? String(object.backupStorageDirectory) : undefined, - }; - }, - - toJSON(message: SystemParams): unknown { - const obj: any = {}; - message.barrierIntervalMs !== undefined && (obj.barrierIntervalMs = Math.round(message.barrierIntervalMs)); - message.checkpointFrequency !== undefined && (obj.checkpointFrequency = Math.round(message.checkpointFrequency)); - message.sstableSizeMb !== undefined && (obj.sstableSizeMb = Math.round(message.sstableSizeMb)); - message.blockSizeKb !== undefined && (obj.blockSizeKb = Math.round(message.blockSizeKb)); - message.bloomFalsePositive !== undefined && (obj.bloomFalsePositive = message.bloomFalsePositive); - message.stateStore !== undefined && (obj.stateStore = message.stateStore); - message.dataDirectory !== undefined && (obj.dataDirectory = message.dataDirectory); - message.backupStorageUrl !== undefined && (obj.backupStorageUrl = message.backupStorageUrl); - message.backupStorageDirectory !== undefined && (obj.backupStorageDirectory = message.backupStorageDirectory); - return obj; - }, - - fromPartial, I>>(object: I): SystemParams { - const message = createBaseSystemParams(); - message.barrierIntervalMs = object.barrierIntervalMs ?? undefined; - message.checkpointFrequency = object.checkpointFrequency ?? undefined; - message.sstableSizeMb = object.sstableSizeMb ?? undefined; - message.blockSizeKb = object.blockSizeKb ?? undefined; - message.bloomFalsePositive = object.bloomFalsePositive ?? undefined; - message.stateStore = object.stateStore ?? undefined; - message.dataDirectory = object.dataDirectory ?? undefined; - message.backupStorageUrl = object.backupStorageUrl ?? undefined; - message.backupStorageDirectory = object.backupStorageDirectory ?? undefined; - return message; - }, -}; - -function createBaseGetSystemParamsRequest(): GetSystemParamsRequest { - return {}; -} - -export const GetSystemParamsRequest = { - fromJSON(_: any): GetSystemParamsRequest { - return {}; - }, - - toJSON(_: GetSystemParamsRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): GetSystemParamsRequest { - const message = createBaseGetSystemParamsRequest(); - return message; - }, -}; - -function createBaseGetSystemParamsResponse(): GetSystemParamsResponse { - return { params: undefined }; -} - -export const GetSystemParamsResponse = { - fromJSON(object: any): GetSystemParamsResponse { - return { params: isSet(object.params) ? SystemParams.fromJSON(object.params) : undefined }; - }, - - toJSON(message: GetSystemParamsResponse): unknown { - const obj: any = {}; - message.params !== undefined && (obj.params = message.params ? SystemParams.toJSON(message.params) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetSystemParamsResponse { - const message = createBaseGetSystemParamsResponse(); - message.params = (object.params !== undefined && object.params !== null) - ? SystemParams.fromPartial(object.params) - : undefined; - return message; - }, -}; - -function createBaseSetSystemParamRequest(): SetSystemParamRequest { - return { param: "", value: undefined }; -} - -export const SetSystemParamRequest = { - fromJSON(object: any): SetSystemParamRequest { - return { - param: isSet(object.param) ? String(object.param) : "", - value: isSet(object.value) ? String(object.value) : undefined, - }; - }, - - toJSON(message: SetSystemParamRequest): unknown { - const obj: any = {}; - message.param !== undefined && (obj.param = message.param); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): SetSystemParamRequest { - const message = createBaseSetSystemParamRequest(); - message.param = object.param ?? ""; - message.value = object.value ?? undefined; - return message; - }, -}; - -function createBaseSetSystemParamResponse(): SetSystemParamResponse { - return {}; -} - -export const SetSystemParamResponse = { - fromJSON(_: any): SetSystemParamResponse { - return {}; - }, - - toJSON(_: SetSystemParamResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): SetSystemParamResponse { - const message = createBaseSetSystemParamResponse(); - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/monitor_service.ts b/dashboard/proto/gen/monitor_service.ts deleted file mode 100644 index 3719692cb88de..0000000000000 --- a/dashboard/proto/gen/monitor_service.ts +++ /dev/null @@ -1,274 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "monitor_service"; - -export interface StackTraceRequest { -} - -export interface StackTraceResponse { - actorTraces: { [key: number]: string }; - rpcTraces: { [key: string]: string }; -} - -export interface StackTraceResponse_ActorTracesEntry { - key: number; - value: string; -} - -export interface StackTraceResponse_RpcTracesEntry { - key: string; - value: string; -} - -export interface ProfilingRequest { - /** How long the profiling should last. */ - sleepS: number; -} - -export interface ProfilingResponse { - result: Uint8Array; -} - -function createBaseStackTraceRequest(): StackTraceRequest { - return {}; -} - -export const StackTraceRequest = { - fromJSON(_: any): StackTraceRequest { - return {}; - }, - - toJSON(_: StackTraceRequest): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): StackTraceRequest { - const message = createBaseStackTraceRequest(); - return message; - }, -}; - -function createBaseStackTraceResponse(): StackTraceResponse { - return { actorTraces: {}, rpcTraces: {} }; -} - -export const StackTraceResponse = { - fromJSON(object: any): StackTraceResponse { - return { - actorTraces: isObject(object.actorTraces) - ? Object.entries(object.actorTraces).reduce<{ [key: number]: string }>((acc, [key, value]) => { - acc[Number(key)] = String(value); - return acc; - }, {}) - : {}, - rpcTraces: isObject(object.rpcTraces) - ? Object.entries(object.rpcTraces).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: StackTraceResponse): unknown { - const obj: any = {}; - obj.actorTraces = {}; - if (message.actorTraces) { - Object.entries(message.actorTraces).forEach(([k, v]) => { - obj.actorTraces[k] = v; - }); - } - obj.rpcTraces = {}; - if (message.rpcTraces) { - Object.entries(message.rpcTraces).forEach(([k, v]) => { - obj.rpcTraces[k] = v; - }); - } - return obj; - }, - - fromPartial, I>>(object: I): StackTraceResponse { - const message = createBaseStackTraceResponse(); - message.actorTraces = Object.entries(object.actorTraces ?? {}).reduce<{ [key: number]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = String(value); - } - return acc; - }, - {}, - ); - message.rpcTraces = Object.entries(object.rpcTraces ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseStackTraceResponse_ActorTracesEntry(): StackTraceResponse_ActorTracesEntry { - return { key: 0, value: "" }; -} - -export const StackTraceResponse_ActorTracesEntry = { - fromJSON(object: any): StackTraceResponse_ActorTracesEntry { - return { key: isSet(object.key) ? Number(object.key) : 0, value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: StackTraceResponse_ActorTracesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>( - object: I, - ): StackTraceResponse_ActorTracesEntry { - const message = createBaseStackTraceResponse_ActorTracesEntry(); - message.key = object.key ?? 0; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseStackTraceResponse_RpcTracesEntry(): StackTraceResponse_RpcTracesEntry { - return { key: "", value: "" }; -} - -export const StackTraceResponse_RpcTracesEntry = { - fromJSON(object: any): StackTraceResponse_RpcTracesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: StackTraceResponse_RpcTracesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>( - object: I, - ): StackTraceResponse_RpcTracesEntry { - const message = createBaseStackTraceResponse_RpcTracesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseProfilingRequest(): ProfilingRequest { - return { sleepS: 0 }; -} - -export const ProfilingRequest = { - fromJSON(object: any): ProfilingRequest { - return { sleepS: isSet(object.sleepS) ? Number(object.sleepS) : 0 }; - }, - - toJSON(message: ProfilingRequest): unknown { - const obj: any = {}; - message.sleepS !== undefined && (obj.sleepS = Math.round(message.sleepS)); - return obj; - }, - - fromPartial, I>>(object: I): ProfilingRequest { - const message = createBaseProfilingRequest(); - message.sleepS = object.sleepS ?? 0; - return message; - }, -}; - -function createBaseProfilingResponse(): ProfilingResponse { - return { result: new Uint8Array() }; -} - -export const ProfilingResponse = { - fromJSON(object: any): ProfilingResponse { - return { result: isSet(object.result) ? bytesFromBase64(object.result) : new Uint8Array() }; - }, - - toJSON(message: ProfilingResponse): unknown { - const obj: any = {}; - message.result !== undefined && - (obj.result = base64FromBytes(message.result !== undefined ? message.result : new Uint8Array())); - return obj; - }, - - fromPartial, I>>(object: I): ProfilingResponse { - const message = createBaseProfilingResponse(); - message.result = object.result ?? new Uint8Array(); - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/order.ts b/dashboard/proto/gen/order.ts deleted file mode 100644 index 6037394eadcee..0000000000000 --- a/dashboard/proto/gen/order.ts +++ /dev/null @@ -1,128 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "order"; - -export const PbDirection = { - PbDirection_UNSPECIFIED: "PbDirection_UNSPECIFIED", - PbDirection_ASCENDING: "PbDirection_ASCENDING", - PbDirection_DESCENDING: "PbDirection_DESCENDING", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type PbDirection = typeof PbDirection[keyof typeof PbDirection]; - -export function pbDirectionFromJSON(object: any): PbDirection { - switch (object) { - case 0: - case "PbDirection_UNSPECIFIED": - return PbDirection.PbDirection_UNSPECIFIED; - case 1: - case "PbDirection_ASCENDING": - return PbDirection.PbDirection_ASCENDING; - case 2: - case "PbDirection_DESCENDING": - return PbDirection.PbDirection_DESCENDING; - case -1: - case "UNRECOGNIZED": - default: - return PbDirection.UNRECOGNIZED; - } -} - -export function pbDirectionToJSON(object: PbDirection): string { - switch (object) { - case PbDirection.PbDirection_UNSPECIFIED: - return "PbDirection_UNSPECIFIED"; - case PbDirection.PbDirection_ASCENDING: - return "PbDirection_ASCENDING"; - case PbDirection.PbDirection_DESCENDING: - return "PbDirection_DESCENDING"; - case PbDirection.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface PbOrderType { - /** - * TODO(rc): enable `NULLS FIRST | LAST` - * PbNullsAre nulls_are = 2; - */ - direction: PbDirection; -} - -/** Column index with an order type (ASC or DESC). Used to represent a sort key (`repeated PbColumnOrder`). */ -export interface PbColumnOrder { - columnIndex: number; - orderType: PbOrderType | undefined; -} - -function createBasePbOrderType(): PbOrderType { - return { direction: PbDirection.PbDirection_UNSPECIFIED }; -} - -export const PbOrderType = { - fromJSON(object: any): PbOrderType { - return { - direction: isSet(object.direction) ? pbDirectionFromJSON(object.direction) : PbDirection.PbDirection_UNSPECIFIED, - }; - }, - - toJSON(message: PbOrderType): unknown { - const obj: any = {}; - message.direction !== undefined && (obj.direction = pbDirectionToJSON(message.direction)); - return obj; - }, - - fromPartial, I>>(object: I): PbOrderType { - const message = createBasePbOrderType(); - message.direction = object.direction ?? PbDirection.PbDirection_UNSPECIFIED; - return message; - }, -}; - -function createBasePbColumnOrder(): PbColumnOrder { - return { columnIndex: 0, orderType: undefined }; -} - -export const PbColumnOrder = { - fromJSON(object: any): PbColumnOrder { - return { - columnIndex: isSet(object.columnIndex) ? Number(object.columnIndex) : 0, - orderType: isSet(object.orderType) ? PbOrderType.fromJSON(object.orderType) : undefined, - }; - }, - - toJSON(message: PbColumnOrder): unknown { - const obj: any = {}; - message.columnIndex !== undefined && (obj.columnIndex = Math.round(message.columnIndex)); - message.orderType !== undefined && - (obj.orderType = message.orderType ? PbOrderType.toJSON(message.orderType) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): PbColumnOrder { - const message = createBasePbColumnOrder(); - message.columnIndex = object.columnIndex ?? 0; - message.orderType = (object.orderType !== undefined && object.orderType !== null) - ? PbOrderType.fromPartial(object.orderType) - : undefined; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/plan_common.ts b/dashboard/proto/gen/plan_common.ts deleted file mode 100644 index 38a14ad1a05a8..0000000000000 --- a/dashboard/proto/gen/plan_common.ts +++ /dev/null @@ -1,414 +0,0 @@ -/* eslint-disable */ -import { ColumnOrder } from "./common"; -import { DataType } from "./data"; - -export const protobufPackage = "plan_common"; - -export const JoinType = { - /** - * UNSPECIFIED - Note that it comes from Calcite's JoinRelType. - * DO NOT HAVE direction for SEMI and ANTI now. - */ - UNSPECIFIED: "UNSPECIFIED", - INNER: "INNER", - LEFT_OUTER: "LEFT_OUTER", - RIGHT_OUTER: "RIGHT_OUTER", - FULL_OUTER: "FULL_OUTER", - LEFT_SEMI: "LEFT_SEMI", - LEFT_ANTI: "LEFT_ANTI", - RIGHT_SEMI: "RIGHT_SEMI", - RIGHT_ANTI: "RIGHT_ANTI", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type JoinType = typeof JoinType[keyof typeof JoinType]; - -export function joinTypeFromJSON(object: any): JoinType { - switch (object) { - case 0: - case "UNSPECIFIED": - return JoinType.UNSPECIFIED; - case 1: - case "INNER": - return JoinType.INNER; - case 2: - case "LEFT_OUTER": - return JoinType.LEFT_OUTER; - case 3: - case "RIGHT_OUTER": - return JoinType.RIGHT_OUTER; - case 4: - case "FULL_OUTER": - return JoinType.FULL_OUTER; - case 5: - case "LEFT_SEMI": - return JoinType.LEFT_SEMI; - case 6: - case "LEFT_ANTI": - return JoinType.LEFT_ANTI; - case 7: - case "RIGHT_SEMI": - return JoinType.RIGHT_SEMI; - case 8: - case "RIGHT_ANTI": - return JoinType.RIGHT_ANTI; - case -1: - case "UNRECOGNIZED": - default: - return JoinType.UNRECOGNIZED; - } -} - -export function joinTypeToJSON(object: JoinType): string { - switch (object) { - case JoinType.UNSPECIFIED: - return "UNSPECIFIED"; - case JoinType.INNER: - return "INNER"; - case JoinType.LEFT_OUTER: - return "LEFT_OUTER"; - case JoinType.RIGHT_OUTER: - return "RIGHT_OUTER"; - case JoinType.FULL_OUTER: - return "FULL_OUTER"; - case JoinType.LEFT_SEMI: - return "LEFT_SEMI"; - case JoinType.LEFT_ANTI: - return "LEFT_ANTI"; - case JoinType.RIGHT_SEMI: - return "RIGHT_SEMI"; - case JoinType.RIGHT_ANTI: - return "RIGHT_ANTI"; - case JoinType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const RowFormatType = { - ROW_UNSPECIFIED: "ROW_UNSPECIFIED", - JSON: "JSON", - PROTOBUF: "PROTOBUF", - DEBEZIUM_JSON: "DEBEZIUM_JSON", - AVRO: "AVRO", - MAXWELL: "MAXWELL", - CANAL_JSON: "CANAL_JSON", - CSV: "CSV", - NATIVE: "NATIVE", - DEBEZIUM_AVRO: "DEBEZIUM_AVRO", - UPSERT_JSON: "UPSERT_JSON", - UPSERT_AVRO: "UPSERT_AVRO", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type RowFormatType = typeof RowFormatType[keyof typeof RowFormatType]; - -export function rowFormatTypeFromJSON(object: any): RowFormatType { - switch (object) { - case 0: - case "ROW_UNSPECIFIED": - return RowFormatType.ROW_UNSPECIFIED; - case 1: - case "JSON": - return RowFormatType.JSON; - case 2: - case "PROTOBUF": - return RowFormatType.PROTOBUF; - case 3: - case "DEBEZIUM_JSON": - return RowFormatType.DEBEZIUM_JSON; - case 4: - case "AVRO": - return RowFormatType.AVRO; - case 5: - case "MAXWELL": - return RowFormatType.MAXWELL; - case 6: - case "CANAL_JSON": - return RowFormatType.CANAL_JSON; - case 7: - case "CSV": - return RowFormatType.CSV; - case 8: - case "NATIVE": - return RowFormatType.NATIVE; - case 9: - case "DEBEZIUM_AVRO": - return RowFormatType.DEBEZIUM_AVRO; - case 10: - case "UPSERT_JSON": - return RowFormatType.UPSERT_JSON; - case 11: - case "UPSERT_AVRO": - return RowFormatType.UPSERT_AVRO; - case -1: - case "UNRECOGNIZED": - default: - return RowFormatType.UNRECOGNIZED; - } -} - -export function rowFormatTypeToJSON(object: RowFormatType): string { - switch (object) { - case RowFormatType.ROW_UNSPECIFIED: - return "ROW_UNSPECIFIED"; - case RowFormatType.JSON: - return "JSON"; - case RowFormatType.PROTOBUF: - return "PROTOBUF"; - case RowFormatType.DEBEZIUM_JSON: - return "DEBEZIUM_JSON"; - case RowFormatType.AVRO: - return "AVRO"; - case RowFormatType.MAXWELL: - return "MAXWELL"; - case RowFormatType.CANAL_JSON: - return "CANAL_JSON"; - case RowFormatType.CSV: - return "CSV"; - case RowFormatType.NATIVE: - return "NATIVE"; - case RowFormatType.DEBEZIUM_AVRO: - return "DEBEZIUM_AVRO"; - case RowFormatType.UPSERT_JSON: - return "UPSERT_JSON"; - case RowFormatType.UPSERT_AVRO: - return "UPSERT_AVRO"; - case RowFormatType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Field is a column in the streaming or batch plan. */ -export interface Field { - dataType: DataType | undefined; - name: string; -} - -export interface ColumnDesc { - columnType: DataType | undefined; - columnId: number; - /** - * we store the column name in column desc now just for debug, but in future - * we should store it in ColumnCatalog but not here - */ - name: string; - /** For STRUCT type. */ - fieldDescs: ColumnDesc[]; - /** - * The user-defined type's name. Empty if the column type is a builtin type. - * For example, when the type is created from a protobuf schema file, - * this field will store the message name. - */ - typeName: string; -} - -export interface ColumnCatalog { - columnDesc: ColumnDesc | undefined; - isHidden: boolean; -} - -export interface StorageTableDesc { - tableId: number; - columns: ColumnDesc[]; - /** TODO: may refactor primary key representations */ - pk: ColumnOrder[]; - distKeyIndices: number[]; - retentionSeconds: number; - valueIndices: number[]; - readPrefixLenHint: number; - /** - * Whether the table is versioned. If `true`, column-aware row encoding will be used - * to be compatible with schema changes. - */ - versioned: boolean; -} - -function createBaseField(): Field { - return { dataType: undefined, name: "" }; -} - -export const Field = { - fromJSON(object: any): Field { - return { - dataType: isSet(object.dataType) ? DataType.fromJSON(object.dataType) : undefined, - name: isSet(object.name) ? String(object.name) : "", - }; - }, - - toJSON(message: Field): unknown { - const obj: any = {}; - message.dataType !== undefined && (obj.dataType = message.dataType ? DataType.toJSON(message.dataType) : undefined); - message.name !== undefined && (obj.name = message.name); - return obj; - }, - - fromPartial, I>>(object: I): Field { - const message = createBaseField(); - message.dataType = (object.dataType !== undefined && object.dataType !== null) - ? DataType.fromPartial(object.dataType) - : undefined; - message.name = object.name ?? ""; - return message; - }, -}; - -function createBaseColumnDesc(): ColumnDesc { - return { columnType: undefined, columnId: 0, name: "", fieldDescs: [], typeName: "" }; -} - -export const ColumnDesc = { - fromJSON(object: any): ColumnDesc { - return { - columnType: isSet(object.columnType) ? DataType.fromJSON(object.columnType) : undefined, - columnId: isSet(object.columnId) ? Number(object.columnId) : 0, - name: isSet(object.name) ? String(object.name) : "", - fieldDescs: Array.isArray(object?.fieldDescs) ? object.fieldDescs.map((e: any) => ColumnDesc.fromJSON(e)) : [], - typeName: isSet(object.typeName) ? String(object.typeName) : "", - }; - }, - - toJSON(message: ColumnDesc): unknown { - const obj: any = {}; - message.columnType !== undefined && - (obj.columnType = message.columnType ? DataType.toJSON(message.columnType) : undefined); - message.columnId !== undefined && (obj.columnId = Math.round(message.columnId)); - message.name !== undefined && (obj.name = message.name); - if (message.fieldDescs) { - obj.fieldDescs = message.fieldDescs.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.fieldDescs = []; - } - message.typeName !== undefined && (obj.typeName = message.typeName); - return obj; - }, - - fromPartial, I>>(object: I): ColumnDesc { - const message = createBaseColumnDesc(); - message.columnType = (object.columnType !== undefined && object.columnType !== null) - ? DataType.fromPartial(object.columnType) - : undefined; - message.columnId = object.columnId ?? 0; - message.name = object.name ?? ""; - message.fieldDescs = object.fieldDescs?.map((e) => ColumnDesc.fromPartial(e)) || []; - message.typeName = object.typeName ?? ""; - return message; - }, -}; - -function createBaseColumnCatalog(): ColumnCatalog { - return { columnDesc: undefined, isHidden: false }; -} - -export const ColumnCatalog = { - fromJSON(object: any): ColumnCatalog { - return { - columnDesc: isSet(object.columnDesc) ? ColumnDesc.fromJSON(object.columnDesc) : undefined, - isHidden: isSet(object.isHidden) ? Boolean(object.isHidden) : false, - }; - }, - - toJSON(message: ColumnCatalog): unknown { - const obj: any = {}; - message.columnDesc !== undefined && - (obj.columnDesc = message.columnDesc ? ColumnDesc.toJSON(message.columnDesc) : undefined); - message.isHidden !== undefined && (obj.isHidden = message.isHidden); - return obj; - }, - - fromPartial, I>>(object: I): ColumnCatalog { - const message = createBaseColumnCatalog(); - message.columnDesc = (object.columnDesc !== undefined && object.columnDesc !== null) - ? ColumnDesc.fromPartial(object.columnDesc) - : undefined; - message.isHidden = object.isHidden ?? false; - return message; - }, -}; - -function createBaseStorageTableDesc(): StorageTableDesc { - return { - tableId: 0, - columns: [], - pk: [], - distKeyIndices: [], - retentionSeconds: 0, - valueIndices: [], - readPrefixLenHint: 0, - versioned: false, - }; -} - -export const StorageTableDesc = { - fromJSON(object: any): StorageTableDesc { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnDesc.fromJSON(e)) : [], - pk: Array.isArray(object?.pk) ? object.pk.map((e: any) => ColumnOrder.fromJSON(e)) : [], - distKeyIndices: Array.isArray(object?.distKeyIndices) ? object.distKeyIndices.map((e: any) => Number(e)) : [], - retentionSeconds: isSet(object.retentionSeconds) ? Number(object.retentionSeconds) : 0, - valueIndices: Array.isArray(object?.valueIndices) ? object.valueIndices.map((e: any) => Number(e)) : [], - readPrefixLenHint: isSet(object.readPrefixLenHint) ? Number(object.readPrefixLenHint) : 0, - versioned: isSet(object.versioned) ? Boolean(object.versioned) : false, - }; - }, - - toJSON(message: StorageTableDesc): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pk) { - obj.pk = message.pk.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.pk = []; - } - if (message.distKeyIndices) { - obj.distKeyIndices = message.distKeyIndices.map((e) => Math.round(e)); - } else { - obj.distKeyIndices = []; - } - message.retentionSeconds !== undefined && (obj.retentionSeconds = Math.round(message.retentionSeconds)); - if (message.valueIndices) { - obj.valueIndices = message.valueIndices.map((e) => Math.round(e)); - } else { - obj.valueIndices = []; - } - message.readPrefixLenHint !== undefined && (obj.readPrefixLenHint = Math.round(message.readPrefixLenHint)); - message.versioned !== undefined && (obj.versioned = message.versioned); - return obj; - }, - - fromPartial, I>>(object: I): StorageTableDesc { - const message = createBaseStorageTableDesc(); - message.tableId = object.tableId ?? 0; - message.columns = object.columns?.map((e) => ColumnDesc.fromPartial(e)) || []; - message.pk = object.pk?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.distKeyIndices = object.distKeyIndices?.map((e) => e) || []; - message.retentionSeconds = object.retentionSeconds ?? 0; - message.valueIndices = object.valueIndices?.map((e) => e) || []; - message.readPrefixLenHint = object.readPrefixLenHint ?? 0; - message.versioned = object.versioned ?? false; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/source.ts b/dashboard/proto/gen/source.ts deleted file mode 100644 index 1d45c28375e74..0000000000000 --- a/dashboard/proto/gen/source.ts +++ /dev/null @@ -1,162 +0,0 @@ -/* eslint-disable */ - -export const protobufPackage = "source"; - -export interface ConnectorSplit { - splitType: string; - encodedSplit: Uint8Array; -} - -export interface ConnectorSplits { - splits: ConnectorSplit[]; -} - -export interface SourceActorInfo { - actorId: number; - splits: ConnectorSplits | undefined; -} - -function createBaseConnectorSplit(): ConnectorSplit { - return { splitType: "", encodedSplit: new Uint8Array() }; -} - -export const ConnectorSplit = { - fromJSON(object: any): ConnectorSplit { - return { - splitType: isSet(object.splitType) ? String(object.splitType) : "", - encodedSplit: isSet(object.encodedSplit) ? bytesFromBase64(object.encodedSplit) : new Uint8Array(), - }; - }, - - toJSON(message: ConnectorSplit): unknown { - const obj: any = {}; - message.splitType !== undefined && (obj.splitType = message.splitType); - message.encodedSplit !== undefined && - (obj.encodedSplit = base64FromBytes( - message.encodedSplit !== undefined ? message.encodedSplit : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>(object: I): ConnectorSplit { - const message = createBaseConnectorSplit(); - message.splitType = object.splitType ?? ""; - message.encodedSplit = object.encodedSplit ?? new Uint8Array(); - return message; - }, -}; - -function createBaseConnectorSplits(): ConnectorSplits { - return { splits: [] }; -} - -export const ConnectorSplits = { - fromJSON(object: any): ConnectorSplits { - return { splits: Array.isArray(object?.splits) ? object.splits.map((e: any) => ConnectorSplit.fromJSON(e)) : [] }; - }, - - toJSON(message: ConnectorSplits): unknown { - const obj: any = {}; - if (message.splits) { - obj.splits = message.splits.map((e) => e ? ConnectorSplit.toJSON(e) : undefined); - } else { - obj.splits = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ConnectorSplits { - const message = createBaseConnectorSplits(); - message.splits = object.splits?.map((e) => ConnectorSplit.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSourceActorInfo(): SourceActorInfo { - return { actorId: 0, splits: undefined }; -} - -export const SourceActorInfo = { - fromJSON(object: any): SourceActorInfo { - return { - actorId: isSet(object.actorId) ? Number(object.actorId) : 0, - splits: isSet(object.splits) ? ConnectorSplits.fromJSON(object.splits) : undefined, - }; - }, - - toJSON(message: SourceActorInfo): unknown { - const obj: any = {}; - message.actorId !== undefined && (obj.actorId = Math.round(message.actorId)); - message.splits !== undefined && (obj.splits = message.splits ? ConnectorSplits.toJSON(message.splits) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SourceActorInfo { - const message = createBaseSourceActorInfo(); - message.actorId = object.actorId ?? 0; - message.splits = (object.splits !== undefined && object.splits !== null) - ? ConnectorSplits.fromPartial(object.splits) - : undefined; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/stream_plan.ts b/dashboard/proto/gen/stream_plan.ts deleted file mode 100644 index 1e150e5dc03cd..0000000000000 --- a/dashboard/proto/gen/stream_plan.ts +++ /dev/null @@ -1,4432 +0,0 @@ -/* eslint-disable */ -import { SinkType, sinkTypeFromJSON, sinkTypeToJSON, StreamSourceInfo, Table, WatermarkDesc } from "./catalog"; -import { Buffer, ColumnOrder } from "./common"; -import { Datum, Epoch, IntervalUnit, StreamChunk } from "./data"; -import { AggCall, ExprNode, InputRef, ProjectSetSelectItem } from "./expr"; -import { - ColumnCatalog, - ColumnDesc, - Field, - JoinType, - joinTypeFromJSON, - joinTypeToJSON, - StorageTableDesc, -} from "./plan_common"; -import { ConnectorSplits } from "./source"; - -export const protobufPackage = "stream_plan"; - -export const HandleConflictBehavior = { - NO_CHECK_UNSPECIFIED: "NO_CHECK_UNSPECIFIED", - OVERWRITE: "OVERWRITE", - IGNORE: "IGNORE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type HandleConflictBehavior = typeof HandleConflictBehavior[keyof typeof HandleConflictBehavior]; - -export function handleConflictBehaviorFromJSON(object: any): HandleConflictBehavior { - switch (object) { - case 0: - case "NO_CHECK_UNSPECIFIED": - return HandleConflictBehavior.NO_CHECK_UNSPECIFIED; - case 1: - case "OVERWRITE": - return HandleConflictBehavior.OVERWRITE; - case 2: - case "IGNORE": - return HandleConflictBehavior.IGNORE; - case -1: - case "UNRECOGNIZED": - default: - return HandleConflictBehavior.UNRECOGNIZED; - } -} - -export function handleConflictBehaviorToJSON(object: HandleConflictBehavior): string { - switch (object) { - case HandleConflictBehavior.NO_CHECK_UNSPECIFIED: - return "NO_CHECK_UNSPECIFIED"; - case HandleConflictBehavior.OVERWRITE: - return "OVERWRITE"; - case HandleConflictBehavior.IGNORE: - return "IGNORE"; - case HandleConflictBehavior.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const ChainType = { - CHAIN_UNSPECIFIED: "CHAIN_UNSPECIFIED", - /** CHAIN - CHAIN is corresponding to the chain executor. */ - CHAIN: "CHAIN", - /** REARRANGE - REARRANGE is corresponding to the rearranged chain executor. */ - REARRANGE: "REARRANGE", - /** BACKFILL - BACKFILL is corresponding to the backfill executor. */ - BACKFILL: "BACKFILL", - /** UPSTREAM_ONLY - UPSTREAM_ONLY is corresponding to the chain executor, but doesn't consume the snapshot. */ - UPSTREAM_ONLY: "UPSTREAM_ONLY", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type ChainType = typeof ChainType[keyof typeof ChainType]; - -export function chainTypeFromJSON(object: any): ChainType { - switch (object) { - case 0: - case "CHAIN_UNSPECIFIED": - return ChainType.CHAIN_UNSPECIFIED; - case 1: - case "CHAIN": - return ChainType.CHAIN; - case 2: - case "REARRANGE": - return ChainType.REARRANGE; - case 3: - case "BACKFILL": - return ChainType.BACKFILL; - case 4: - case "UPSTREAM_ONLY": - return ChainType.UPSTREAM_ONLY; - case -1: - case "UNRECOGNIZED": - default: - return ChainType.UNRECOGNIZED; - } -} - -export function chainTypeToJSON(object: ChainType): string { - switch (object) { - case ChainType.CHAIN_UNSPECIFIED: - return "CHAIN_UNSPECIFIED"; - case ChainType.CHAIN: - return "CHAIN"; - case ChainType.REARRANGE: - return "REARRANGE"; - case ChainType.BACKFILL: - return "BACKFILL"; - case ChainType.UPSTREAM_ONLY: - return "UPSTREAM_ONLY"; - case ChainType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const DispatcherType = { - UNSPECIFIED: "UNSPECIFIED", - /** HASH - Dispatch by hash key, hashed by consistent hash. */ - HASH: "HASH", - /** - * BROADCAST - Broadcast to all downstreams. - * - * Note a broadcast cannot be represented as multiple simple dispatchers, since they are - * different when we update dispatchers during scaling. - */ - BROADCAST: "BROADCAST", - /** SIMPLE - Only one downstream. */ - SIMPLE: "SIMPLE", - /** - * NO_SHUFFLE - A special kind of exchange that doesn't involve shuffle. The upstream actor will be directly - * piped into the downstream actor, if there are the same number of actors. If number of actors - * are not the same, should use hash instead. Should be only used when distribution is the same. - */ - NO_SHUFFLE: "NO_SHUFFLE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type DispatcherType = typeof DispatcherType[keyof typeof DispatcherType]; - -export function dispatcherTypeFromJSON(object: any): DispatcherType { - switch (object) { - case 0: - case "UNSPECIFIED": - return DispatcherType.UNSPECIFIED; - case 1: - case "HASH": - return DispatcherType.HASH; - case 2: - case "BROADCAST": - return DispatcherType.BROADCAST; - case 3: - case "SIMPLE": - return DispatcherType.SIMPLE; - case 4: - case "NO_SHUFFLE": - return DispatcherType.NO_SHUFFLE; - case -1: - case "UNRECOGNIZED": - default: - return DispatcherType.UNRECOGNIZED; - } -} - -export function dispatcherTypeToJSON(object: DispatcherType): string { - switch (object) { - case DispatcherType.UNSPECIFIED: - return "UNSPECIFIED"; - case DispatcherType.HASH: - return "HASH"; - case DispatcherType.BROADCAST: - return "BROADCAST"; - case DispatcherType.SIMPLE: - return "SIMPLE"; - case DispatcherType.NO_SHUFFLE: - return "NO_SHUFFLE"; - case DispatcherType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export const FragmentTypeFlag = { - FRAGMENT_UNSPECIFIED: "FRAGMENT_UNSPECIFIED", - SOURCE: "SOURCE", - MVIEW: "MVIEW", - SINK: "SINK", - NOW: "NOW", - CHAIN_NODE: "CHAIN_NODE", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type FragmentTypeFlag = typeof FragmentTypeFlag[keyof typeof FragmentTypeFlag]; - -export function fragmentTypeFlagFromJSON(object: any): FragmentTypeFlag { - switch (object) { - case 0: - case "FRAGMENT_UNSPECIFIED": - return FragmentTypeFlag.FRAGMENT_UNSPECIFIED; - case 1: - case "SOURCE": - return FragmentTypeFlag.SOURCE; - case 2: - case "MVIEW": - return FragmentTypeFlag.MVIEW; - case 4: - case "SINK": - return FragmentTypeFlag.SINK; - case 8: - case "NOW": - return FragmentTypeFlag.NOW; - case 16: - case "CHAIN_NODE": - return FragmentTypeFlag.CHAIN_NODE; - case -1: - case "UNRECOGNIZED": - default: - return FragmentTypeFlag.UNRECOGNIZED; - } -} - -export function fragmentTypeFlagToJSON(object: FragmentTypeFlag): string { - switch (object) { - case FragmentTypeFlag.FRAGMENT_UNSPECIFIED: - return "FRAGMENT_UNSPECIFIED"; - case FragmentTypeFlag.SOURCE: - return "SOURCE"; - case FragmentTypeFlag.MVIEW: - return "MVIEW"; - case FragmentTypeFlag.SINK: - return "SINK"; - case FragmentTypeFlag.NOW: - return "NOW"; - case FragmentTypeFlag.CHAIN_NODE: - return "CHAIN_NODE"; - case FragmentTypeFlag.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface AddMutation { - /** New dispatchers for each actor. */ - actorDispatchers: { [key: number]: AddMutation_Dispatchers }; - /** - * We may embed a source change split mutation here. - * TODO: we may allow multiple mutations in a single barrier. - */ - actorSplits: { [key: number]: ConnectorSplits }; -} - -export interface AddMutation_Dispatchers { - dispatchers: Dispatcher[]; -} - -export interface AddMutation_ActorDispatchersEntry { - key: number; - value: AddMutation_Dispatchers | undefined; -} - -export interface AddMutation_ActorSplitsEntry { - key: number; - value: ConnectorSplits | undefined; -} - -export interface StopMutation { - actors: number[]; -} - -export interface UpdateMutation { - /** Dispatcher updates. */ - dispatcherUpdate: UpdateMutation_DispatcherUpdate[]; - /** Merge updates. */ - mergeUpdate: UpdateMutation_MergeUpdate[]; - /** Vnode bitmap updates for each actor. */ - actorVnodeBitmapUpdate: { [key: number]: Buffer }; - /** All actors to be dropped in this update. */ - droppedActors: number[]; - /** Source updates. */ - actorSplits: { [key: number]: ConnectorSplits }; -} - -export interface UpdateMutation_DispatcherUpdate { - /** Dispatcher can be uniquely identified by a combination of actor id and dispatcher id. */ - actorId: number; - dispatcherId: number; - /** - * The hash mapping for consistent hash. - * For dispatcher types other than HASH, this is ignored. - */ - hashMapping: - | ActorMapping - | undefined; - /** Added downstream actors. */ - addedDownstreamActorId: number[]; - /** Removed downstream actors. */ - removedDownstreamActorId: number[]; -} - -export interface UpdateMutation_MergeUpdate { - /** Merge executor can be uniquely identified by a combination of actor id and upstream fragment id. */ - actorId: number; - upstreamFragmentId: number; - /** - * - For scaling, this is always `None`. - * - For plan change, the upstream fragment will be changed to a new one, and this will be `Some`. - * In this case, all the upstream actors should be removed and replaced by the `new` ones. - */ - newUpstreamFragmentId?: - | number - | undefined; - /** Added upstream actors. */ - addedUpstreamActorId: number[]; - /** Removed upstream actors. */ - removedUpstreamActorId: number[]; -} - -export interface UpdateMutation_ActorVnodeBitmapUpdateEntry { - key: number; - value: Buffer | undefined; -} - -export interface UpdateMutation_ActorSplitsEntry { - key: number; - value: ConnectorSplits | undefined; -} - -export interface SourceChangeSplitMutation { - actorSplits: { [key: number]: ConnectorSplits }; -} - -export interface SourceChangeSplitMutation_ActorSplitsEntry { - key: number; - value: ConnectorSplits | undefined; -} - -export interface PauseMutation { -} - -export interface ResumeMutation { -} - -export interface Barrier { - epoch: Epoch | undefined; - mutation?: - | { $case: "add"; add: AddMutation } - | { $case: "stop"; stop: StopMutation } - | { $case: "update"; update: UpdateMutation } - | { $case: "splits"; splits: SourceChangeSplitMutation } - | { $case: "pause"; pause: PauseMutation } - | { $case: "resume"; resume: ResumeMutation }; - /** Used for tracing. */ - span: Uint8Array; - /** Whether this barrier do checkpoint */ - checkpoint: boolean; - /** Record the actors that the barrier has passed. Only used for debugging. */ - passedActors: number[]; -} - -export interface Watermark { - /** The reference to the watermark column in the stream's schema. */ - column: - | InputRef - | undefined; - /** The watermark value, there will be no record having a greater value in the watermark column. */ - val: Datum | undefined; -} - -export interface StreamMessage { - streamMessage?: { $case: "streamChunk"; streamChunk: StreamChunk } | { $case: "barrier"; barrier: Barrier } | { - $case: "watermark"; - watermark: Watermark; - }; -} - -/** Hash mapping for compute node. Stores mapping from virtual node to actor id. */ -export interface ActorMapping { - originalIndices: number[]; - data: number[]; -} - -export interface StreamSource { - sourceId: number; - stateTable: Table | undefined; - rowIdIndex?: number | undefined; - columns: ColumnCatalog[]; - pkColumnIds: number[]; - properties: { [key: string]: string }; - info: StreamSourceInfo | undefined; - sourceName: string; -} - -export interface StreamSource_PropertiesEntry { - key: string; - value: string; -} - -export interface SourceNode { - /** - * The source node can contain either a stream source or nothing. So here we extract all - * information about stream source to a message, and here it will be an `Option` in Rust. - */ - sourceInner: StreamSource | undefined; -} - -export interface SinkDesc { - id: number; - name: string; - definition: string; - columns: ColumnDesc[]; - pk: ColumnOrder[]; - streamKey: number[]; - distributionKey: number[]; - properties: { [key: string]: string }; - sinkType: SinkType; -} - -export interface SinkDesc_PropertiesEntry { - key: string; - value: string; -} - -export interface SinkNode { - sinkDesc: SinkDesc | undefined; -} - -export interface ProjectNode { - selectList: ExprNode[]; - /** - * this two field is expressing a list of usize pair, which means when project receives a - * watermark with `watermark_input_key[i]` column index, it should derive a new watermark - * with `watermark_output_key[i]`th expression - */ - watermarkInputKey: number[]; - watermarkOutputKey: number[]; -} - -export interface FilterNode { - searchCondition: ExprNode | undefined; -} - -/** - * A materialized view is regarded as a table. - * In addition, we also specify primary key to MV for efficient point lookup during update and deletion. - * - * The node will be used for both create mv and create index. - * - When creating mv, `pk == distribution_key == column_orders`. - * - When creating index, `column_orders` will contain both - * arrange columns and pk columns, while distribution key will be arrange columns. - */ -export interface MaterializeNode { - tableId: number; - /** Column indexes and orders of primary key. */ - columnOrders: ColumnOrder[]; - /** Used for internal table states. */ - table: - | Table - | undefined; - /** Used to handle pk conflict, open it when upstream executor is source executor. */ - handlePkConflictBehavior: HandleConflictBehavior; -} - -export interface AggCallState { - inner?: { $case: "resultValueState"; resultValueState: AggCallState_ResultValueState } | { - $case: "tableState"; - tableState: AggCallState_TableState; - } | { $case: "materializedInputState"; materializedInputState: AggCallState_MaterializedInputState }; -} - -/** use the one column of stream's result table as the AggCall's state, used for count/sum/append-only extreme. */ -export interface AggCallState_ResultValueState { -} - -/** use untransformed result as the AggCall's state, used for append-only approx count distinct. */ -export interface AggCallState_TableState { - table: Table | undefined; -} - -/** use the some column of the Upstream's materialization as the AggCall's state, used for extreme/string_agg/array_agg. */ -export interface AggCallState_MaterializedInputState { - table: - | Table - | undefined; - /** for constructing state table column mapping */ - includedUpstreamIndices: number[]; - tableValueIndices: number[]; -} - -export interface SimpleAggNode { - aggCalls: AggCall[]; - /** Only used for local simple agg, not used for global simple agg. */ - distributionKey: number[]; - aggCallStates: AggCallState[]; - resultTable: - | Table - | undefined; - /** - * Whether to optimize for append only stream. - * It is true when the input is append-only - */ - isAppendOnly: boolean; - distinctDedupTables: { [key: number]: Table }; - rowCountIndex: number; -} - -export interface SimpleAggNode_DistinctDedupTablesEntry { - key: number; - value: Table | undefined; -} - -export interface HashAggNode { - groupKey: number[]; - aggCalls: AggCall[]; - aggCallStates: AggCallState[]; - resultTable: - | Table - | undefined; - /** - * Whether to optimize for append only stream. - * It is true when the input is append-only - */ - isAppendOnly: boolean; - distinctDedupTables: { [key: number]: Table }; - rowCountIndex: number; -} - -export interface HashAggNode_DistinctDedupTablesEntry { - key: number; - value: Table | undefined; -} - -export interface TopNNode { - /** 0 means no limit as limit of 0 means this node should be optimized away */ - limit: number; - offset: number; - table: Table | undefined; - orderBy: ColumnOrder[]; - withTies: boolean; -} - -export interface GroupTopNNode { - /** 0 means no limit as limit of 0 means this node should be optimized away */ - limit: number; - offset: number; - groupKey: number[]; - table: Table | undefined; - orderBy: ColumnOrder[]; - withTies: boolean; -} - -export interface HashJoinNode { - joinType: JoinType; - leftKey: number[]; - rightKey: number[]; - condition: - | ExprNode - | undefined; - /** Used for internal table states. */ - leftTable: - | Table - | undefined; - /** Used for internal table states. */ - rightTable: - | Table - | undefined; - /** Used for internal table states. */ - leftDegreeTable: - | Table - | undefined; - /** Used for internal table states. */ - rightDegreeTable: - | Table - | undefined; - /** The output indices of current node */ - outputIndices: number[]; - /** - * Left deduped input pk indices. The pk of the left_table and - * left_degree_table is [left_join_key | left_deduped_input_pk_indices] - * and is expected to be the shortest key which starts with - * the join key and satisfies unique constrain. - */ - leftDedupedInputPkIndices: number[]; - /** - * Right deduped input pk indices. The pk of the right_table and - * right_degree_table is [right_join_key | right_deduped_input_pk_indices] - * and is expected to be the shortest key which starts with - * the join key and satisfies unique constrain. - */ - rightDedupedInputPkIndices: number[]; - nullSafe: boolean[]; - /** - * Whether to optimize for append only stream. - * It is true when the input is append-only - */ - isAppendOnly: boolean; -} - -export interface DynamicFilterNode { - leftKey: number; - /** Must be one of <, <=, >, >= */ - condition: - | ExprNode - | undefined; - /** Left table stores all states with predicate possibly not NULL. */ - leftTable: - | Table - | undefined; - /** Right table stores single value from RHS of predicate. */ - rightTable: Table | undefined; -} - -/** - * Delta join with two indexes. This is a pseudo plan node generated on frontend. On meta - * service, it will be rewritten into lookup joins. - */ -export interface DeltaIndexJoinNode { - joinType: JoinType; - leftKey: number[]; - rightKey: number[]; - condition: - | ExprNode - | undefined; - /** Table id of the left index. */ - leftTableId: number; - /** Table id of the right index. */ - rightTableId: number; - /** Info about the left index */ - leftInfo: - | ArrangementInfo - | undefined; - /** Info about the right index */ - rightInfo: - | ArrangementInfo - | undefined; - /** the output indices of current node */ - outputIndices: number[]; -} - -export interface HopWindowNode { - timeCol: number; - windowSlide: IntervalUnit | undefined; - windowSize: IntervalUnit | undefined; - outputIndices: number[]; - windowStartExprs: ExprNode[]; - windowEndExprs: ExprNode[]; -} - -export interface MergeNode { - upstreamActorId: number[]; - upstreamFragmentId: number; - /** - * Type of the upstream dispatcher. If there's always one upstream according to this - * type, the compute node may use the `ReceiverExecutor` as an optimization. - */ - upstreamDispatcherType: DispatcherType; - /** The schema of input columns. TODO: remove this field. */ - fields: Field[]; -} - -/** - * passed from frontend to meta, used by fragmenter to generate `MergeNode` - * and maybe `DispatcherNode` later. - */ -export interface ExchangeNode { - strategy: DispatchStrategy | undefined; -} - -/** - * ChainNode is used for mv on mv. - * ChainNode is like a "UNION" on mv snapshot and streaming. So it takes two inputs with fixed order: - * 1. MergeNode (as a placeholder) for streaming read. - * 2. BatchPlanNode for snapshot read. - */ -export interface ChainNode { - tableId: number; - /** The schema of input stream, which will be used to build a MergeNode */ - upstreamFields: Field[]; - /** Which columns from upstream are used in this Chain node. */ - upstreamColumnIndices: number[]; - /** - * Generally, the barrier needs to be rearranged during the MV creation process, so that data can - * be flushed to shared buffer periodically, instead of making the first epoch from batch query extra - * large. However, in some cases, e.g., shared state, the barrier cannot be rearranged in ChainNode. - * ChainType is used to decide which implementation for the ChainNode. - */ - chainType: ChainType; - /** - * Whether the upstream materialize is and this chain should be a singleton. - * FIXME: This is a workaround for fragmenter since the distribution info will be lost if there's only one - * fragment in the downstream mview. Remove this when we refactor the fragmenter. - */ - isSingleton: boolean; - /** The upstream materialized view info used by backfill. */ - tableDesc: StorageTableDesc | undefined; -} - -/** - * BatchPlanNode is used for mv on mv snapshot read. - * BatchPlanNode is supposed to carry a batch plan that can be optimized with the streaming plan_common. - * Currently, streaming to batch push down is not yet supported, BatchPlanNode is simply a table scan. - */ -export interface BatchPlanNode { - tableDesc: StorageTableDesc | undefined; - columnIds: number[]; -} - -export interface ArrangementInfo { - /** - * Order key of the arrangement, including order by columns and pk from the materialize - * executor. - */ - arrangeKeyOrders: ColumnOrder[]; - /** Column descs of the arrangement */ - columnDescs: ColumnDesc[]; - /** Used to build storage table by stream lookup join of delta join. */ - tableDesc: StorageTableDesc | undefined; -} - -/** - * Special node for shared state, which will only be produced in fragmenter. ArrangeNode will - * produce a special Materialize executor, which materializes data for downstream to query. - */ -export interface ArrangeNode { - /** Info about the arrangement */ - tableInfo: - | ArrangementInfo - | undefined; - /** Hash key of the materialize node, which is a subset of pk. */ - distributionKey: number[]; - /** Used for internal table states. */ - table: - | Table - | undefined; - /** Used to control whether doing sanity check, open it when upstream executor is source executor. */ - handlePkConflictBehavior: HandleConflictBehavior; -} - -/** Special node for shared state. LookupNode will join an arrangement with a stream. */ -export interface LookupNode { - /** Join key of the arrangement side */ - arrangeKey: number[]; - /** Join key of the stream side */ - streamKey: number[]; - /** Whether to join the current epoch of arrangement */ - useCurrentEpoch: boolean; - /** - * Sometimes we need to re-order the output data to meet the requirement of schema. - * By default, lookup executor will produce ``. We - * will then apply the column mapping to the combined result. - */ - columnMapping: number[]; - arrangementTableId?: - | { $case: "tableId"; tableId: number } - | { $case: "indexId"; indexId: number }; - /** Info about the arrangement */ - arrangementTableInfo: ArrangementInfo | undefined; -} - -/** WatermarkFilter needs to filter the upstream data by the water mark. */ -export interface WatermarkFilterNode { - /** The watermark descs */ - watermarkDescs: WatermarkDesc[]; - /** The tables used to persist watermarks, the key is vnode. */ - tables: Table[]; -} - -/** Acts like a merger, but on different inputs. */ -export interface UnionNode { -} - -/** Special node for shared state. Merge and align barrier from upstreams. Pipe inputs in order. */ -export interface LookupUnionNode { - order: number[]; -} - -export interface ExpandNode { - columnSubsets: ExpandNode_Subset[]; -} - -export interface ExpandNode_Subset { - columnIndices: number[]; -} - -export interface ProjectSetNode { - selectList: ProjectSetSelectItem[]; -} - -/** Sorts inputs and outputs ordered data based on watermark. */ -export interface SortNode { - /** Persists data above watermark. */ - stateTable: - | Table - | undefined; - /** Column index of watermark to perform sorting. */ - sortColumnIndex: number; -} - -/** Merges two streams from streaming and batch for data manipulation. */ -export interface DmlNode { - /** Id of the table on which DML performs. */ - tableId: number; - /** Version of the table. */ - tableVersionId: number; - /** Column descriptions of the table. */ - columnDescs: ColumnDesc[]; -} - -export interface RowIdGenNode { - rowIdIndex: number; -} - -export interface NowNode { - /** Persists emitted 'now'. */ - stateTable: Table | undefined; -} - -export interface StreamNode { - nodeBody?: - | { $case: "source"; source: SourceNode } - | { $case: "project"; project: ProjectNode } - | { $case: "filter"; filter: FilterNode } - | { $case: "materialize"; materialize: MaterializeNode } - | { $case: "localSimpleAgg"; localSimpleAgg: SimpleAggNode } - | { $case: "globalSimpleAgg"; globalSimpleAgg: SimpleAggNode } - | { $case: "hashAgg"; hashAgg: HashAggNode } - | { $case: "appendOnlyTopN"; appendOnlyTopN: TopNNode } - | { $case: "hashJoin"; hashJoin: HashJoinNode } - | { $case: "topN"; topN: TopNNode } - | { $case: "hopWindow"; hopWindow: HopWindowNode } - | { $case: "merge"; merge: MergeNode } - | { $case: "exchange"; exchange: ExchangeNode } - | { $case: "chain"; chain: ChainNode } - | { $case: "batchPlan"; batchPlan: BatchPlanNode } - | { $case: "lookup"; lookup: LookupNode } - | { $case: "arrange"; arrange: ArrangeNode } - | { $case: "lookupUnion"; lookupUnion: LookupUnionNode } - | { $case: "union"; union: UnionNode } - | { $case: "deltaIndexJoin"; deltaIndexJoin: DeltaIndexJoinNode } - | { $case: "sink"; sink: SinkNode } - | { $case: "expand"; expand: ExpandNode } - | { $case: "dynamicFilter"; dynamicFilter: DynamicFilterNode } - | { $case: "projectSet"; projectSet: ProjectSetNode } - | { $case: "groupTopN"; groupTopN: GroupTopNNode } - | { $case: "sort"; sort: SortNode } - | { $case: "watermarkFilter"; watermarkFilter: WatermarkFilterNode } - | { $case: "dml"; dml: DmlNode } - | { $case: "rowIdGen"; rowIdGen: RowIdGenNode } - | { $case: "now"; now: NowNode } - | { $case: "appendOnlyGroupTopN"; appendOnlyGroupTopN: GroupTopNNode }; - /** - * The id for the operator. This is local per mview. - * TODO: should better be a uint32. - */ - operatorId: number; - /** Child node in plan aka. upstream nodes in the streaming DAG */ - input: StreamNode[]; - streamKey: number[]; - appendOnly: boolean; - identity: string; - /** The schema of the plan node */ - fields: Field[]; -} - -/** - * The property of an edge in the fragment graph. - * This is essientially a "logical" version of `Dispatcher`. See the doc of `Dispatcher` for more details. - */ -export interface DispatchStrategy { - type: DispatcherType; - distKeyIndices: number[]; - outputIndices: number[]; -} - -/** - * A dispatcher redistribute messages. - * We encode both the type and other usage information in the proto. - */ -export interface Dispatcher { - type: DispatcherType; - /** - * Indices of the columns to be used for hashing. - * For dispatcher types other than HASH, this is ignored. - */ - distKeyIndices: number[]; - /** - * Indices of the columns to output. - * In most cases, this contains all columns in the input. But for some cases like MV on MV or - * schema change, we may only output a subset of the columns. - */ - outputIndices: number[]; - /** - * The hash mapping for consistent hash. - * For dispatcher types other than HASH, this is ignored. - */ - hashMapping: - | ActorMapping - | undefined; - /** - * Dispatcher can be uniquely identified by a combination of actor id and dispatcher id. - * This is exactly the same as its downstream fragment id. - */ - dispatcherId: number; - /** Number of downstreams decides how many endpoints a dispatcher should dispatch. */ - downstreamActorId: number[]; -} - -/** A StreamActor is a running fragment of the overall stream graph, */ -export interface StreamActor { - actorId: number; - fragmentId: number; - nodes: StreamNode | undefined; - dispatcher: Dispatcher[]; - /** - * The actors that send messages to this actor. - * Note that upstream actor ids are also stored in the proto of merge nodes. - * It is painstaking to traverse through the node tree and get upstream actor id from the root StreamNode. - * We duplicate the information here to ease the parsing logic in stream manager. - */ - upstreamActorId: number[]; - /** - * Vnodes that the executors in this actor own. - * If the fragment is a singleton, this field will not be set and leave a `None`. - */ - vnodeBitmap: - | Buffer - | undefined; - /** The SQL definition of this materialized view. Used for debugging only. */ - mviewDefinition: string; -} - -/** The environment associated with a stream plan */ -export interface StreamEnvironment { - /** The timezone associated with the streaming plan. Only applies to MV for now. */ - timezone: string; -} - -export interface StreamFragmentGraph { - /** all the fragments in the graph. */ - fragments: { [key: number]: StreamFragmentGraph_StreamFragment }; - /** edges between fragments. */ - edges: StreamFragmentGraph_StreamFragmentEdge[]; - dependentRelationIds: number[]; - tableIdsCnt: number; - env: - | StreamEnvironment - | undefined; - /** If none, default parallelism will be applied. */ - parallelism: StreamFragmentGraph_Parallelism | undefined; -} - -export interface StreamFragmentGraph_StreamFragment { - /** 0-based on frontend, and will be rewritten to global id on meta. */ - fragmentId: number; - /** root stream node in this fragment. */ - node: - | StreamNode - | undefined; - /** Bitwise-OR of FragmentTypeFlags */ - fragmentTypeMask: number; - /** mark whether this fragment should only have one actor. */ - isSingleton: boolean; - /** Number of table ids (stateful states) for this fragment. */ - tableIdsCnt: number; - /** Mark the upstream table ids of this fragment, Used for fragments with `Chain`s. */ - upstreamTableIds: number[]; -} - -export interface StreamFragmentGraph_StreamFragmentEdge { - /** Dispatch strategy for the fragment. */ - dispatchStrategy: - | DispatchStrategy - | undefined; - /** - * A unique identifier of this edge. Generally it should be exchange node's operator id. When - * rewriting fragments into delta joins or when inserting 1-to-1 exchange, there will be - * virtual links generated. - */ - linkId: number; - upstreamId: number; - downstreamId: number; -} - -export interface StreamFragmentGraph_Parallelism { - parallelism: number; -} - -export interface StreamFragmentGraph_FragmentsEntry { - key: number; - value: StreamFragmentGraph_StreamFragment | undefined; -} - -function createBaseAddMutation(): AddMutation { - return { actorDispatchers: {}, actorSplits: {} }; -} - -export const AddMutation = { - fromJSON(object: any): AddMutation { - return { - actorDispatchers: isObject(object.actorDispatchers) - ? Object.entries(object.actorDispatchers).reduce<{ [key: number]: AddMutation_Dispatchers }>( - (acc, [key, value]) => { - acc[Number(key)] = AddMutation_Dispatchers.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - actorSplits: isObject(object.actorSplits) - ? Object.entries(object.actorSplits).reduce<{ [key: number]: ConnectorSplits }>((acc, [key, value]) => { - acc[Number(key)] = ConnectorSplits.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: AddMutation): unknown { - const obj: any = {}; - obj.actorDispatchers = {}; - if (message.actorDispatchers) { - Object.entries(message.actorDispatchers).forEach(([k, v]) => { - obj.actorDispatchers[k] = AddMutation_Dispatchers.toJSON(v); - }); - } - obj.actorSplits = {}; - if (message.actorSplits) { - Object.entries(message.actorSplits).forEach(([k, v]) => { - obj.actorSplits[k] = ConnectorSplits.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): AddMutation { - const message = createBaseAddMutation(); - message.actorDispatchers = Object.entries(object.actorDispatchers ?? {}).reduce< - { [key: number]: AddMutation_Dispatchers } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = AddMutation_Dispatchers.fromPartial(value); - } - return acc; - }, {}); - message.actorSplits = Object.entries(object.actorSplits ?? {}).reduce<{ [key: number]: ConnectorSplits }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ConnectorSplits.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseAddMutation_Dispatchers(): AddMutation_Dispatchers { - return { dispatchers: [] }; -} - -export const AddMutation_Dispatchers = { - fromJSON(object: any): AddMutation_Dispatchers { - return { - dispatchers: Array.isArray(object?.dispatchers) ? object.dispatchers.map((e: any) => Dispatcher.fromJSON(e)) : [], - }; - }, - - toJSON(message: AddMutation_Dispatchers): unknown { - const obj: any = {}; - if (message.dispatchers) { - obj.dispatchers = message.dispatchers.map((e) => e ? Dispatcher.toJSON(e) : undefined); - } else { - obj.dispatchers = []; - } - return obj; - }, - - fromPartial, I>>(object: I): AddMutation_Dispatchers { - const message = createBaseAddMutation_Dispatchers(); - message.dispatchers = object.dispatchers?.map((e) => Dispatcher.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseAddMutation_ActorDispatchersEntry(): AddMutation_ActorDispatchersEntry { - return { key: 0, value: undefined }; -} - -export const AddMutation_ActorDispatchersEntry = { - fromJSON(object: any): AddMutation_ActorDispatchersEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? AddMutation_Dispatchers.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: AddMutation_ActorDispatchersEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? AddMutation_Dispatchers.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): AddMutation_ActorDispatchersEntry { - const message = createBaseAddMutation_ActorDispatchersEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? AddMutation_Dispatchers.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseAddMutation_ActorSplitsEntry(): AddMutation_ActorSplitsEntry { - return { key: 0, value: undefined }; -} - -export const AddMutation_ActorSplitsEntry = { - fromJSON(object: any): AddMutation_ActorSplitsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ConnectorSplits.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: AddMutation_ActorSplitsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? ConnectorSplits.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AddMutation_ActorSplitsEntry { - const message = createBaseAddMutation_ActorSplitsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ConnectorSplits.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseStopMutation(): StopMutation { - return { actors: [] }; -} - -export const StopMutation = { - fromJSON(object: any): StopMutation { - return { actors: Array.isArray(object?.actors) ? object.actors.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: StopMutation): unknown { - const obj: any = {}; - if (message.actors) { - obj.actors = message.actors.map((e) => Math.round(e)); - } else { - obj.actors = []; - } - return obj; - }, - - fromPartial, I>>(object: I): StopMutation { - const message = createBaseStopMutation(); - message.actors = object.actors?.map((e) => e) || []; - return message; - }, -}; - -function createBaseUpdateMutation(): UpdateMutation { - return { dispatcherUpdate: [], mergeUpdate: [], actorVnodeBitmapUpdate: {}, droppedActors: [], actorSplits: {} }; -} - -export const UpdateMutation = { - fromJSON(object: any): UpdateMutation { - return { - dispatcherUpdate: Array.isArray(object?.dispatcherUpdate) - ? object.dispatcherUpdate.map((e: any) => UpdateMutation_DispatcherUpdate.fromJSON(e)) - : [], - mergeUpdate: Array.isArray(object?.mergeUpdate) - ? object.mergeUpdate.map((e: any) => UpdateMutation_MergeUpdate.fromJSON(e)) - : [], - actorVnodeBitmapUpdate: isObject(object.actorVnodeBitmapUpdate) - ? Object.entries(object.actorVnodeBitmapUpdate).reduce<{ [key: number]: Buffer }>((acc, [key, value]) => { - acc[Number(key)] = Buffer.fromJSON(value); - return acc; - }, {}) - : {}, - droppedActors: Array.isArray(object?.droppedActors) - ? object.droppedActors.map((e: any) => Number(e)) - : [], - actorSplits: isObject(object.actorSplits) - ? Object.entries(object.actorSplits).reduce<{ [key: number]: ConnectorSplits }>((acc, [key, value]) => { - acc[Number(key)] = ConnectorSplits.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: UpdateMutation): unknown { - const obj: any = {}; - if (message.dispatcherUpdate) { - obj.dispatcherUpdate = message.dispatcherUpdate.map((e) => - e ? UpdateMutation_DispatcherUpdate.toJSON(e) : undefined - ); - } else { - obj.dispatcherUpdate = []; - } - if (message.mergeUpdate) { - obj.mergeUpdate = message.mergeUpdate.map((e) => e ? UpdateMutation_MergeUpdate.toJSON(e) : undefined); - } else { - obj.mergeUpdate = []; - } - obj.actorVnodeBitmapUpdate = {}; - if (message.actorVnodeBitmapUpdate) { - Object.entries(message.actorVnodeBitmapUpdate).forEach(([k, v]) => { - obj.actorVnodeBitmapUpdate[k] = Buffer.toJSON(v); - }); - } - if (message.droppedActors) { - obj.droppedActors = message.droppedActors.map((e) => Math.round(e)); - } else { - obj.droppedActors = []; - } - obj.actorSplits = {}; - if (message.actorSplits) { - Object.entries(message.actorSplits).forEach(([k, v]) => { - obj.actorSplits[k] = ConnectorSplits.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): UpdateMutation { - const message = createBaseUpdateMutation(); - message.dispatcherUpdate = object.dispatcherUpdate?.map((e) => UpdateMutation_DispatcherUpdate.fromPartial(e)) || - []; - message.mergeUpdate = object.mergeUpdate?.map((e) => UpdateMutation_MergeUpdate.fromPartial(e)) || []; - message.actorVnodeBitmapUpdate = Object.entries(object.actorVnodeBitmapUpdate ?? {}).reduce< - { [key: number]: Buffer } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = Buffer.fromPartial(value); - } - return acc; - }, {}); - message.droppedActors = object.droppedActors?.map((e) => e) || []; - message.actorSplits = Object.entries(object.actorSplits ?? {}).reduce<{ [key: number]: ConnectorSplits }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ConnectorSplits.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseUpdateMutation_DispatcherUpdate(): UpdateMutation_DispatcherUpdate { - return { - actorId: 0, - dispatcherId: 0, - hashMapping: undefined, - addedDownstreamActorId: [], - removedDownstreamActorId: [], - }; -} - -export const UpdateMutation_DispatcherUpdate = { - fromJSON(object: any): UpdateMutation_DispatcherUpdate { - return { - actorId: isSet(object.actorId) ? Number(object.actorId) : 0, - dispatcherId: isSet(object.dispatcherId) ? Number(object.dispatcherId) : 0, - hashMapping: isSet(object.hashMapping) ? ActorMapping.fromJSON(object.hashMapping) : undefined, - addedDownstreamActorId: Array.isArray(object?.addedDownstreamActorId) - ? object.addedDownstreamActorId.map((e: any) => Number(e)) - : [], - removedDownstreamActorId: Array.isArray(object?.removedDownstreamActorId) - ? object.removedDownstreamActorId.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: UpdateMutation_DispatcherUpdate): unknown { - const obj: any = {}; - message.actorId !== undefined && (obj.actorId = Math.round(message.actorId)); - message.dispatcherId !== undefined && (obj.dispatcherId = Math.round(message.dispatcherId)); - message.hashMapping !== undefined && - (obj.hashMapping = message.hashMapping ? ActorMapping.toJSON(message.hashMapping) : undefined); - if (message.addedDownstreamActorId) { - obj.addedDownstreamActorId = message.addedDownstreamActorId.map((e) => Math.round(e)); - } else { - obj.addedDownstreamActorId = []; - } - if (message.removedDownstreamActorId) { - obj.removedDownstreamActorId = message.removedDownstreamActorId.map((e) => Math.round(e)); - } else { - obj.removedDownstreamActorId = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): UpdateMutation_DispatcherUpdate { - const message = createBaseUpdateMutation_DispatcherUpdate(); - message.actorId = object.actorId ?? 0; - message.dispatcherId = object.dispatcherId ?? 0; - message.hashMapping = (object.hashMapping !== undefined && object.hashMapping !== null) - ? ActorMapping.fromPartial(object.hashMapping) - : undefined; - message.addedDownstreamActorId = object.addedDownstreamActorId?.map((e) => e) || []; - message.removedDownstreamActorId = object.removedDownstreamActorId?.map((e) => e) || []; - return message; - }, -}; - -function createBaseUpdateMutation_MergeUpdate(): UpdateMutation_MergeUpdate { - return { - actorId: 0, - upstreamFragmentId: 0, - newUpstreamFragmentId: undefined, - addedUpstreamActorId: [], - removedUpstreamActorId: [], - }; -} - -export const UpdateMutation_MergeUpdate = { - fromJSON(object: any): UpdateMutation_MergeUpdate { - return { - actorId: isSet(object.actorId) ? Number(object.actorId) : 0, - upstreamFragmentId: isSet(object.upstreamFragmentId) ? Number(object.upstreamFragmentId) : 0, - newUpstreamFragmentId: isSet(object.newUpstreamFragmentId) ? Number(object.newUpstreamFragmentId) : undefined, - addedUpstreamActorId: Array.isArray(object?.addedUpstreamActorId) - ? object.addedUpstreamActorId.map((e: any) => Number(e)) - : [], - removedUpstreamActorId: Array.isArray(object?.removedUpstreamActorId) - ? object.removedUpstreamActorId.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: UpdateMutation_MergeUpdate): unknown { - const obj: any = {}; - message.actorId !== undefined && (obj.actorId = Math.round(message.actorId)); - message.upstreamFragmentId !== undefined && (obj.upstreamFragmentId = Math.round(message.upstreamFragmentId)); - message.newUpstreamFragmentId !== undefined && - (obj.newUpstreamFragmentId = Math.round(message.newUpstreamFragmentId)); - if (message.addedUpstreamActorId) { - obj.addedUpstreamActorId = message.addedUpstreamActorId.map((e) => Math.round(e)); - } else { - obj.addedUpstreamActorId = []; - } - if (message.removedUpstreamActorId) { - obj.removedUpstreamActorId = message.removedUpstreamActorId.map((e) => Math.round(e)); - } else { - obj.removedUpstreamActorId = []; - } - return obj; - }, - - fromPartial, I>>(object: I): UpdateMutation_MergeUpdate { - const message = createBaseUpdateMutation_MergeUpdate(); - message.actorId = object.actorId ?? 0; - message.upstreamFragmentId = object.upstreamFragmentId ?? 0; - message.newUpstreamFragmentId = object.newUpstreamFragmentId ?? undefined; - message.addedUpstreamActorId = object.addedUpstreamActorId?.map((e) => e) || []; - message.removedUpstreamActorId = object.removedUpstreamActorId?.map((e) => e) || []; - return message; - }, -}; - -function createBaseUpdateMutation_ActorVnodeBitmapUpdateEntry(): UpdateMutation_ActorVnodeBitmapUpdateEntry { - return { key: 0, value: undefined }; -} - -export const UpdateMutation_ActorVnodeBitmapUpdateEntry = { - fromJSON(object: any): UpdateMutation_ActorVnodeBitmapUpdateEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? Buffer.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: UpdateMutation_ActorVnodeBitmapUpdateEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? Buffer.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): UpdateMutation_ActorVnodeBitmapUpdateEntry { - const message = createBaseUpdateMutation_ActorVnodeBitmapUpdateEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? Buffer.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseUpdateMutation_ActorSplitsEntry(): UpdateMutation_ActorSplitsEntry { - return { key: 0, value: undefined }; -} - -export const UpdateMutation_ActorSplitsEntry = { - fromJSON(object: any): UpdateMutation_ActorSplitsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ConnectorSplits.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: UpdateMutation_ActorSplitsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? ConnectorSplits.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): UpdateMutation_ActorSplitsEntry { - const message = createBaseUpdateMutation_ActorSplitsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ConnectorSplits.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseSourceChangeSplitMutation(): SourceChangeSplitMutation { - return { actorSplits: {} }; -} - -export const SourceChangeSplitMutation = { - fromJSON(object: any): SourceChangeSplitMutation { - return { - actorSplits: isObject(object.actorSplits) - ? Object.entries(object.actorSplits).reduce<{ [key: number]: ConnectorSplits }>((acc, [key, value]) => { - acc[Number(key)] = ConnectorSplits.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: SourceChangeSplitMutation): unknown { - const obj: any = {}; - obj.actorSplits = {}; - if (message.actorSplits) { - Object.entries(message.actorSplits).forEach(([k, v]) => { - obj.actorSplits[k] = ConnectorSplits.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>(object: I): SourceChangeSplitMutation { - const message = createBaseSourceChangeSplitMutation(); - message.actorSplits = Object.entries(object.actorSplits ?? {}).reduce<{ [key: number]: ConnectorSplits }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = ConnectorSplits.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseSourceChangeSplitMutation_ActorSplitsEntry(): SourceChangeSplitMutation_ActorSplitsEntry { - return { key: 0, value: undefined }; -} - -export const SourceChangeSplitMutation_ActorSplitsEntry = { - fromJSON(object: any): SourceChangeSplitMutation_ActorSplitsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? ConnectorSplits.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: SourceChangeSplitMutation_ActorSplitsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? ConnectorSplits.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SourceChangeSplitMutation_ActorSplitsEntry { - const message = createBaseSourceChangeSplitMutation_ActorSplitsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? ConnectorSplits.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBasePauseMutation(): PauseMutation { - return {}; -} - -export const PauseMutation = { - fromJSON(_: any): PauseMutation { - return {}; - }, - - toJSON(_: PauseMutation): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): PauseMutation { - const message = createBasePauseMutation(); - return message; - }, -}; - -function createBaseResumeMutation(): ResumeMutation { - return {}; -} - -export const ResumeMutation = { - fromJSON(_: any): ResumeMutation { - return {}; - }, - - toJSON(_: ResumeMutation): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): ResumeMutation { - const message = createBaseResumeMutation(); - return message; - }, -}; - -function createBaseBarrier(): Barrier { - return { epoch: undefined, mutation: undefined, span: new Uint8Array(), checkpoint: false, passedActors: [] }; -} - -export const Barrier = { - fromJSON(object: any): Barrier { - return { - epoch: isSet(object.epoch) ? Epoch.fromJSON(object.epoch) : undefined, - mutation: isSet(object.add) - ? { $case: "add", add: AddMutation.fromJSON(object.add) } - : isSet(object.stop) - ? { $case: "stop", stop: StopMutation.fromJSON(object.stop) } - : isSet(object.update) - ? { $case: "update", update: UpdateMutation.fromJSON(object.update) } - : isSet(object.splits) - ? { $case: "splits", splits: SourceChangeSplitMutation.fromJSON(object.splits) } - : isSet(object.pause) - ? { $case: "pause", pause: PauseMutation.fromJSON(object.pause) } - : isSet(object.resume) - ? { $case: "resume", resume: ResumeMutation.fromJSON(object.resume) } - : undefined, - span: isSet(object.span) ? bytesFromBase64(object.span) : new Uint8Array(), - checkpoint: isSet(object.checkpoint) ? Boolean(object.checkpoint) : false, - passedActors: Array.isArray(object?.passedActors) - ? object.passedActors.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: Barrier): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = message.epoch ? Epoch.toJSON(message.epoch) : undefined); - message.mutation?.$case === "add" && - (obj.add = message.mutation?.add ? AddMutation.toJSON(message.mutation?.add) : undefined); - message.mutation?.$case === "stop" && - (obj.stop = message.mutation?.stop ? StopMutation.toJSON(message.mutation?.stop) : undefined); - message.mutation?.$case === "update" && - (obj.update = message.mutation?.update ? UpdateMutation.toJSON(message.mutation?.update) : undefined); - message.mutation?.$case === "splits" && - (obj.splits = message.mutation?.splits ? SourceChangeSplitMutation.toJSON(message.mutation?.splits) : undefined); - message.mutation?.$case === "pause" && - (obj.pause = message.mutation?.pause ? PauseMutation.toJSON(message.mutation?.pause) : undefined); - message.mutation?.$case === "resume" && - (obj.resume = message.mutation?.resume ? ResumeMutation.toJSON(message.mutation?.resume) : undefined); - message.span !== undefined && - (obj.span = base64FromBytes(message.span !== undefined ? message.span : new Uint8Array())); - message.checkpoint !== undefined && (obj.checkpoint = message.checkpoint); - if (message.passedActors) { - obj.passedActors = message.passedActors.map((e) => Math.round(e)); - } else { - obj.passedActors = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Barrier { - const message = createBaseBarrier(); - message.epoch = (object.epoch !== undefined && object.epoch !== null) ? Epoch.fromPartial(object.epoch) : undefined; - if (object.mutation?.$case === "add" && object.mutation?.add !== undefined && object.mutation?.add !== null) { - message.mutation = { $case: "add", add: AddMutation.fromPartial(object.mutation.add) }; - } - if (object.mutation?.$case === "stop" && object.mutation?.stop !== undefined && object.mutation?.stop !== null) { - message.mutation = { $case: "stop", stop: StopMutation.fromPartial(object.mutation.stop) }; - } - if ( - object.mutation?.$case === "update" && object.mutation?.update !== undefined && object.mutation?.update !== null - ) { - message.mutation = { $case: "update", update: UpdateMutation.fromPartial(object.mutation.update) }; - } - if ( - object.mutation?.$case === "splits" && object.mutation?.splits !== undefined && object.mutation?.splits !== null - ) { - message.mutation = { $case: "splits", splits: SourceChangeSplitMutation.fromPartial(object.mutation.splits) }; - } - if (object.mutation?.$case === "pause" && object.mutation?.pause !== undefined && object.mutation?.pause !== null) { - message.mutation = { $case: "pause", pause: PauseMutation.fromPartial(object.mutation.pause) }; - } - if ( - object.mutation?.$case === "resume" && object.mutation?.resume !== undefined && object.mutation?.resume !== null - ) { - message.mutation = { $case: "resume", resume: ResumeMutation.fromPartial(object.mutation.resume) }; - } - message.span = object.span ?? new Uint8Array(); - message.checkpoint = object.checkpoint ?? false; - message.passedActors = object.passedActors?.map((e) => e) || []; - return message; - }, -}; - -function createBaseWatermark(): Watermark { - return { column: undefined, val: undefined }; -} - -export const Watermark = { - fromJSON(object: any): Watermark { - return { - column: isSet(object.column) ? InputRef.fromJSON(object.column) : undefined, - val: isSet(object.val) ? Datum.fromJSON(object.val) : undefined, - }; - }, - - toJSON(message: Watermark): unknown { - const obj: any = {}; - message.column !== undefined && (obj.column = message.column ? InputRef.toJSON(message.column) : undefined); - message.val !== undefined && (obj.val = message.val ? Datum.toJSON(message.val) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): Watermark { - const message = createBaseWatermark(); - message.column = (object.column !== undefined && object.column !== null) - ? InputRef.fromPartial(object.column) - : undefined; - message.val = (object.val !== undefined && object.val !== null) ? Datum.fromPartial(object.val) : undefined; - return message; - }, -}; - -function createBaseStreamMessage(): StreamMessage { - return { streamMessage: undefined }; -} - -export const StreamMessage = { - fromJSON(object: any): StreamMessage { - return { - streamMessage: isSet(object.streamChunk) - ? { $case: "streamChunk", streamChunk: StreamChunk.fromJSON(object.streamChunk) } - : isSet(object.barrier) - ? { $case: "barrier", barrier: Barrier.fromJSON(object.barrier) } - : isSet(object.watermark) - ? { $case: "watermark", watermark: Watermark.fromJSON(object.watermark) } - : undefined, - }; - }, - - toJSON(message: StreamMessage): unknown { - const obj: any = {}; - message.streamMessage?.$case === "streamChunk" && (obj.streamChunk = message.streamMessage?.streamChunk - ? StreamChunk.toJSON(message.streamMessage?.streamChunk) - : undefined); - message.streamMessage?.$case === "barrier" && - (obj.barrier = message.streamMessage?.barrier ? Barrier.toJSON(message.streamMessage?.barrier) : undefined); - message.streamMessage?.$case === "watermark" && - (obj.watermark = message.streamMessage?.watermark - ? Watermark.toJSON(message.streamMessage?.watermark) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): StreamMessage { - const message = createBaseStreamMessage(); - if ( - object.streamMessage?.$case === "streamChunk" && - object.streamMessage?.streamChunk !== undefined && - object.streamMessage?.streamChunk !== null - ) { - message.streamMessage = { - $case: "streamChunk", - streamChunk: StreamChunk.fromPartial(object.streamMessage.streamChunk), - }; - } - if ( - object.streamMessage?.$case === "barrier" && - object.streamMessage?.barrier !== undefined && - object.streamMessage?.barrier !== null - ) { - message.streamMessage = { $case: "barrier", barrier: Barrier.fromPartial(object.streamMessage.barrier) }; - } - if ( - object.streamMessage?.$case === "watermark" && - object.streamMessage?.watermark !== undefined && - object.streamMessage?.watermark !== null - ) { - message.streamMessage = { $case: "watermark", watermark: Watermark.fromPartial(object.streamMessage.watermark) }; - } - return message; - }, -}; - -function createBaseActorMapping(): ActorMapping { - return { originalIndices: [], data: [] }; -} - -export const ActorMapping = { - fromJSON(object: any): ActorMapping { - return { - originalIndices: Array.isArray(object?.originalIndices) ? object.originalIndices.map((e: any) => Number(e)) : [], - data: Array.isArray(object?.data) ? object.data.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ActorMapping): unknown { - const obj: any = {}; - if (message.originalIndices) { - obj.originalIndices = message.originalIndices.map((e) => Math.round(e)); - } else { - obj.originalIndices = []; - } - if (message.data) { - obj.data = message.data.map((e) => Math.round(e)); - } else { - obj.data = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ActorMapping { - const message = createBaseActorMapping(); - message.originalIndices = object.originalIndices?.map((e) => e) || []; - message.data = object.data?.map((e) => e) || []; - return message; - }, -}; - -function createBaseStreamSource(): StreamSource { - return { - sourceId: 0, - stateTable: undefined, - rowIdIndex: undefined, - columns: [], - pkColumnIds: [], - properties: {}, - info: undefined, - sourceName: "", - }; -} - -export const StreamSource = { - fromJSON(object: any): StreamSource { - return { - sourceId: isSet(object.sourceId) ? Number(object.sourceId) : 0, - stateTable: isSet(object.stateTable) ? Table.fromJSON(object.stateTable) : undefined, - rowIdIndex: isSet(object.rowIdIndex) ? Number(object.rowIdIndex) : undefined, - columns: Array.isArray(object?.columns) ? object.columns.map((e: any) => ColumnCatalog.fromJSON(e)) : [], - pkColumnIds: Array.isArray(object?.pkColumnIds) ? object.pkColumnIds.map((e: any) => Number(e)) : [], - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - info: isSet(object.info) ? StreamSourceInfo.fromJSON(object.info) : undefined, - sourceName: isSet(object.sourceName) ? String(object.sourceName) : "", - }; - }, - - toJSON(message: StreamSource): unknown { - const obj: any = {}; - message.sourceId !== undefined && (obj.sourceId = Math.round(message.sourceId)); - message.stateTable !== undefined && - (obj.stateTable = message.stateTable ? Table.toJSON(message.stateTable) : undefined); - message.rowIdIndex !== undefined && (obj.rowIdIndex = Math.round(message.rowIdIndex)); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnCatalog.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pkColumnIds) { - obj.pkColumnIds = message.pkColumnIds.map((e) => Math.round(e)); - } else { - obj.pkColumnIds = []; - } - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.info !== undefined && (obj.info = message.info ? StreamSourceInfo.toJSON(message.info) : undefined); - message.sourceName !== undefined && (obj.sourceName = message.sourceName); - return obj; - }, - - fromPartial, I>>(object: I): StreamSource { - const message = createBaseStreamSource(); - message.sourceId = object.sourceId ?? 0; - message.stateTable = (object.stateTable !== undefined && object.stateTable !== null) - ? Table.fromPartial(object.stateTable) - : undefined; - message.rowIdIndex = object.rowIdIndex ?? undefined; - message.columns = object.columns?.map((e) => ColumnCatalog.fromPartial(e)) || []; - message.pkColumnIds = object.pkColumnIds?.map((e) => e) || []; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.info = (object.info !== undefined && object.info !== null) - ? StreamSourceInfo.fromPartial(object.info) - : undefined; - message.sourceName = object.sourceName ?? ""; - return message; - }, -}; - -function createBaseStreamSource_PropertiesEntry(): StreamSource_PropertiesEntry { - return { key: "", value: "" }; -} - -export const StreamSource_PropertiesEntry = { - fromJSON(object: any): StreamSource_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: StreamSource_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): StreamSource_PropertiesEntry { - const message = createBaseStreamSource_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSourceNode(): SourceNode { - return { sourceInner: undefined }; -} - -export const SourceNode = { - fromJSON(object: any): SourceNode { - return { sourceInner: isSet(object.sourceInner) ? StreamSource.fromJSON(object.sourceInner) : undefined }; - }, - - toJSON(message: SourceNode): unknown { - const obj: any = {}; - message.sourceInner !== undefined && - (obj.sourceInner = message.sourceInner ? StreamSource.toJSON(message.sourceInner) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SourceNode { - const message = createBaseSourceNode(); - message.sourceInner = (object.sourceInner !== undefined && object.sourceInner !== null) - ? StreamSource.fromPartial(object.sourceInner) - : undefined; - return message; - }, -}; - -function createBaseSinkDesc(): SinkDesc { - return { - id: 0, - name: "", - definition: "", - columns: [], - pk: [], - streamKey: [], - distributionKey: [], - properties: {}, - sinkType: SinkType.UNSPECIFIED, - }; -} - -export const SinkDesc = { - fromJSON(object: any): SinkDesc { - return { - id: isSet(object.id) ? Number(object.id) : 0, - name: isSet(object.name) ? String(object.name) : "", - definition: isSet(object.definition) ? String(object.definition) : "", - columns: Array.isArray(object?.columns) - ? object.columns.map((e: any) => ColumnDesc.fromJSON(e)) - : [], - pk: Array.isArray(object?.pk) ? object.pk.map((e: any) => ColumnOrder.fromJSON(e)) : [], - streamKey: Array.isArray(object?.streamKey) ? object.streamKey.map((e: any) => Number(e)) : [], - distributionKey: Array.isArray(object?.distributionKey) ? object.distributionKey.map((e: any) => Number(e)) : [], - properties: isObject(object.properties) - ? Object.entries(object.properties).reduce<{ [key: string]: string }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}) - : {}, - sinkType: isSet(object.sinkType) ? sinkTypeFromJSON(object.sinkType) : SinkType.UNSPECIFIED, - }; - }, - - toJSON(message: SinkDesc): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.name !== undefined && (obj.name = message.name); - message.definition !== undefined && (obj.definition = message.definition); - if (message.columns) { - obj.columns = message.columns.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.columns = []; - } - if (message.pk) { - obj.pk = message.pk.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.pk = []; - } - if (message.streamKey) { - obj.streamKey = message.streamKey.map((e) => Math.round(e)); - } else { - obj.streamKey = []; - } - if (message.distributionKey) { - obj.distributionKey = message.distributionKey.map((e) => Math.round(e)); - } else { - obj.distributionKey = []; - } - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.sinkType !== undefined && (obj.sinkType = sinkTypeToJSON(message.sinkType)); - return obj; - }, - - fromPartial, I>>(object: I): SinkDesc { - const message = createBaseSinkDesc(); - message.id = object.id ?? 0; - message.name = object.name ?? ""; - message.definition = object.definition ?? ""; - message.columns = object.columns?.map((e) => ColumnDesc.fromPartial(e)) || []; - message.pk = object.pk?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.streamKey = object.streamKey?.map((e) => e) || []; - message.distributionKey = object.distributionKey?.map((e) => e) || []; - message.properties = Object.entries(object.properties ?? {}).reduce<{ [key: string]: string }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, - {}, - ); - message.sinkType = object.sinkType ?? SinkType.UNSPECIFIED; - return message; - }, -}; - -function createBaseSinkDesc_PropertiesEntry(): SinkDesc_PropertiesEntry { - return { key: "", value: "" }; -} - -export const SinkDesc_PropertiesEntry = { - fromJSON(object: any): SinkDesc_PropertiesEntry { - return { key: isSet(object.key) ? String(object.key) : "", value: isSet(object.value) ? String(object.value) : "" }; - }, - - toJSON(message: SinkDesc_PropertiesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>(object: I): SinkDesc_PropertiesEntry { - const message = createBaseSinkDesc_PropertiesEntry(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -function createBaseSinkNode(): SinkNode { - return { sinkDesc: undefined }; -} - -export const SinkNode = { - fromJSON(object: any): SinkNode { - return { sinkDesc: isSet(object.sinkDesc) ? SinkDesc.fromJSON(object.sinkDesc) : undefined }; - }, - - toJSON(message: SinkNode): unknown { - const obj: any = {}; - message.sinkDesc !== undefined && (obj.sinkDesc = message.sinkDesc ? SinkDesc.toJSON(message.sinkDesc) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): SinkNode { - const message = createBaseSinkNode(); - message.sinkDesc = (object.sinkDesc !== undefined && object.sinkDesc !== null) - ? SinkDesc.fromPartial(object.sinkDesc) - : undefined; - return message; - }, -}; - -function createBaseProjectNode(): ProjectNode { - return { selectList: [], watermarkInputKey: [], watermarkOutputKey: [] }; -} - -export const ProjectNode = { - fromJSON(object: any): ProjectNode { - return { - selectList: Array.isArray(object?.selectList) ? object.selectList.map((e: any) => ExprNode.fromJSON(e)) : [], - watermarkInputKey: Array.isArray(object?.watermarkInputKey) - ? object.watermarkInputKey.map((e: any) => Number(e)) - : [], - watermarkOutputKey: Array.isArray(object?.watermarkOutputKey) - ? object.watermarkOutputKey.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: ProjectNode): unknown { - const obj: any = {}; - if (message.selectList) { - obj.selectList = message.selectList.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.selectList = []; - } - if (message.watermarkInputKey) { - obj.watermarkInputKey = message.watermarkInputKey.map((e) => Math.round(e)); - } else { - obj.watermarkInputKey = []; - } - if (message.watermarkOutputKey) { - obj.watermarkOutputKey = message.watermarkOutputKey.map((e) => Math.round(e)); - } else { - obj.watermarkOutputKey = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ProjectNode { - const message = createBaseProjectNode(); - message.selectList = object.selectList?.map((e) => ExprNode.fromPartial(e)) || []; - message.watermarkInputKey = object.watermarkInputKey?.map((e) => e) || []; - message.watermarkOutputKey = object.watermarkOutputKey?.map((e) => e) || []; - return message; - }, -}; - -function createBaseFilterNode(): FilterNode { - return { searchCondition: undefined }; -} - -export const FilterNode = { - fromJSON(object: any): FilterNode { - return { searchCondition: isSet(object.searchCondition) ? ExprNode.fromJSON(object.searchCondition) : undefined }; - }, - - toJSON(message: FilterNode): unknown { - const obj: any = {}; - message.searchCondition !== undefined && - (obj.searchCondition = message.searchCondition ? ExprNode.toJSON(message.searchCondition) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): FilterNode { - const message = createBaseFilterNode(); - message.searchCondition = (object.searchCondition !== undefined && object.searchCondition !== null) - ? ExprNode.fromPartial(object.searchCondition) - : undefined; - return message; - }, -}; - -function createBaseMaterializeNode(): MaterializeNode { - return { - tableId: 0, - columnOrders: [], - table: undefined, - handlePkConflictBehavior: HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - }; -} - -export const MaterializeNode = { - fromJSON(object: any): MaterializeNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - columnOrders: Array.isArray(object?.columnOrders) - ? object.columnOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - handlePkConflictBehavior: isSet(object.handlePkConflictBehavior) - ? handleConflictBehaviorFromJSON(object.handlePkConflictBehavior) - : HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - }; - }, - - toJSON(message: MaterializeNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - if (message.columnOrders) { - obj.columnOrders = message.columnOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.columnOrders = []; - } - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - message.handlePkConflictBehavior !== undefined && - (obj.handlePkConflictBehavior = handleConflictBehaviorToJSON(message.handlePkConflictBehavior)); - return obj; - }, - - fromPartial, I>>(object: I): MaterializeNode { - const message = createBaseMaterializeNode(); - message.tableId = object.tableId ?? 0; - message.columnOrders = object.columnOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.handlePkConflictBehavior = object.handlePkConflictBehavior ?? HandleConflictBehavior.NO_CHECK_UNSPECIFIED; - return message; - }, -}; - -function createBaseAggCallState(): AggCallState { - return { inner: undefined }; -} - -export const AggCallState = { - fromJSON(object: any): AggCallState { - return { - inner: isSet(object.resultValueState) - ? { - $case: "resultValueState", - resultValueState: AggCallState_ResultValueState.fromJSON(object.resultValueState), - } - : isSet(object.tableState) - ? { $case: "tableState", tableState: AggCallState_TableState.fromJSON(object.tableState) } - : isSet(object.materializedInputState) - ? { - $case: "materializedInputState", - materializedInputState: AggCallState_MaterializedInputState.fromJSON(object.materializedInputState), - } - : undefined, - }; - }, - - toJSON(message: AggCallState): unknown { - const obj: any = {}; - message.inner?.$case === "resultValueState" && (obj.resultValueState = message.inner?.resultValueState - ? AggCallState_ResultValueState.toJSON(message.inner?.resultValueState) - : undefined); - message.inner?.$case === "tableState" && (obj.tableState = message.inner?.tableState - ? AggCallState_TableState.toJSON(message.inner?.tableState) - : undefined); - message.inner?.$case === "materializedInputState" && - (obj.materializedInputState = message.inner?.materializedInputState - ? AggCallState_MaterializedInputState.toJSON(message.inner?.materializedInputState) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AggCallState { - const message = createBaseAggCallState(); - if ( - object.inner?.$case === "resultValueState" && - object.inner?.resultValueState !== undefined && - object.inner?.resultValueState !== null - ) { - message.inner = { - $case: "resultValueState", - resultValueState: AggCallState_ResultValueState.fromPartial(object.inner.resultValueState), - }; - } - if ( - object.inner?.$case === "tableState" && - object.inner?.tableState !== undefined && - object.inner?.tableState !== null - ) { - message.inner = { $case: "tableState", tableState: AggCallState_TableState.fromPartial(object.inner.tableState) }; - } - if ( - object.inner?.$case === "materializedInputState" && - object.inner?.materializedInputState !== undefined && - object.inner?.materializedInputState !== null - ) { - message.inner = { - $case: "materializedInputState", - materializedInputState: AggCallState_MaterializedInputState.fromPartial(object.inner.materializedInputState), - }; - } - return message; - }, -}; - -function createBaseAggCallState_ResultValueState(): AggCallState_ResultValueState { - return {}; -} - -export const AggCallState_ResultValueState = { - fromJSON(_: any): AggCallState_ResultValueState { - return {}; - }, - - toJSON(_: AggCallState_ResultValueState): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): AggCallState_ResultValueState { - const message = createBaseAggCallState_ResultValueState(); - return message; - }, -}; - -function createBaseAggCallState_TableState(): AggCallState_TableState { - return { table: undefined }; -} - -export const AggCallState_TableState = { - fromJSON(object: any): AggCallState_TableState { - return { table: isSet(object.table) ? Table.fromJSON(object.table) : undefined }; - }, - - toJSON(message: AggCallState_TableState): unknown { - const obj: any = {}; - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AggCallState_TableState { - const message = createBaseAggCallState_TableState(); - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - return message; - }, -}; - -function createBaseAggCallState_MaterializedInputState(): AggCallState_MaterializedInputState { - return { table: undefined, includedUpstreamIndices: [], tableValueIndices: [] }; -} - -export const AggCallState_MaterializedInputState = { - fromJSON(object: any): AggCallState_MaterializedInputState { - return { - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - includedUpstreamIndices: Array.isArray(object?.includedUpstreamIndices) - ? object.includedUpstreamIndices.map((e: any) => Number(e)) - : [], - tableValueIndices: Array.isArray(object?.tableValueIndices) - ? object.tableValueIndices.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: AggCallState_MaterializedInputState): unknown { - const obj: any = {}; - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - if (message.includedUpstreamIndices) { - obj.includedUpstreamIndices = message.includedUpstreamIndices.map((e) => Math.round(e)); - } else { - obj.includedUpstreamIndices = []; - } - if (message.tableValueIndices) { - obj.tableValueIndices = message.tableValueIndices.map((e) => Math.round(e)); - } else { - obj.tableValueIndices = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): AggCallState_MaterializedInputState { - const message = createBaseAggCallState_MaterializedInputState(); - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.includedUpstreamIndices = object.includedUpstreamIndices?.map((e) => e) || []; - message.tableValueIndices = object.tableValueIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseSimpleAggNode(): SimpleAggNode { - return { - aggCalls: [], - distributionKey: [], - aggCallStates: [], - resultTable: undefined, - isAppendOnly: false, - distinctDedupTables: {}, - rowCountIndex: 0, - }; -} - -export const SimpleAggNode = { - fromJSON(object: any): SimpleAggNode { - return { - aggCalls: Array.isArray(object?.aggCalls) ? object.aggCalls.map((e: any) => AggCall.fromJSON(e)) : [], - distributionKey: Array.isArray(object?.distributionKey) ? object.distributionKey.map((e: any) => Number(e)) : [], - aggCallStates: Array.isArray(object?.aggCallStates) - ? object.aggCallStates.map((e: any) => AggCallState.fromJSON(e)) - : [], - resultTable: isSet(object.resultTable) ? Table.fromJSON(object.resultTable) : undefined, - isAppendOnly: isSet(object.isAppendOnly) ? Boolean(object.isAppendOnly) : false, - distinctDedupTables: isObject(object.distinctDedupTables) - ? Object.entries(object.distinctDedupTables).reduce<{ [key: number]: Table }>((acc, [key, value]) => { - acc[Number(key)] = Table.fromJSON(value); - return acc; - }, {}) - : {}, - rowCountIndex: isSet(object.rowCountIndex) ? Number(object.rowCountIndex) : 0, - }; - }, - - toJSON(message: SimpleAggNode): unknown { - const obj: any = {}; - if (message.aggCalls) { - obj.aggCalls = message.aggCalls.map((e) => e ? AggCall.toJSON(e) : undefined); - } else { - obj.aggCalls = []; - } - if (message.distributionKey) { - obj.distributionKey = message.distributionKey.map((e) => Math.round(e)); - } else { - obj.distributionKey = []; - } - if (message.aggCallStates) { - obj.aggCallStates = message.aggCallStates.map((e) => e ? AggCallState.toJSON(e) : undefined); - } else { - obj.aggCallStates = []; - } - message.resultTable !== undefined && - (obj.resultTable = message.resultTable ? Table.toJSON(message.resultTable) : undefined); - message.isAppendOnly !== undefined && (obj.isAppendOnly = message.isAppendOnly); - obj.distinctDedupTables = {}; - if (message.distinctDedupTables) { - Object.entries(message.distinctDedupTables).forEach(([k, v]) => { - obj.distinctDedupTables[k] = Table.toJSON(v); - }); - } - message.rowCountIndex !== undefined && (obj.rowCountIndex = Math.round(message.rowCountIndex)); - return obj; - }, - - fromPartial, I>>(object: I): SimpleAggNode { - const message = createBaseSimpleAggNode(); - message.aggCalls = object.aggCalls?.map((e) => AggCall.fromPartial(e)) || []; - message.distributionKey = object.distributionKey?.map((e) => e) || []; - message.aggCallStates = object.aggCallStates?.map((e) => AggCallState.fromPartial(e)) || []; - message.resultTable = (object.resultTable !== undefined && object.resultTable !== null) - ? Table.fromPartial(object.resultTable) - : undefined; - message.isAppendOnly = object.isAppendOnly ?? false; - message.distinctDedupTables = Object.entries(object.distinctDedupTables ?? {}).reduce<{ [key: number]: Table }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = Table.fromPartial(value); - } - return acc; - }, - {}, - ); - message.rowCountIndex = object.rowCountIndex ?? 0; - return message; - }, -}; - -function createBaseSimpleAggNode_DistinctDedupTablesEntry(): SimpleAggNode_DistinctDedupTablesEntry { - return { key: 0, value: undefined }; -} - -export const SimpleAggNode_DistinctDedupTablesEntry = { - fromJSON(object: any): SimpleAggNode_DistinctDedupTablesEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? Table.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: SimpleAggNode_DistinctDedupTablesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? Table.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): SimpleAggNode_DistinctDedupTablesEntry { - const message = createBaseSimpleAggNode_DistinctDedupTablesEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) ? Table.fromPartial(object.value) : undefined; - return message; - }, -}; - -function createBaseHashAggNode(): HashAggNode { - return { - groupKey: [], - aggCalls: [], - aggCallStates: [], - resultTable: undefined, - isAppendOnly: false, - distinctDedupTables: {}, - rowCountIndex: 0, - }; -} - -export const HashAggNode = { - fromJSON(object: any): HashAggNode { - return { - groupKey: Array.isArray(object?.groupKey) ? object.groupKey.map((e: any) => Number(e)) : [], - aggCalls: Array.isArray(object?.aggCalls) ? object.aggCalls.map((e: any) => AggCall.fromJSON(e)) : [], - aggCallStates: Array.isArray(object?.aggCallStates) - ? object.aggCallStates.map((e: any) => AggCallState.fromJSON(e)) - : [], - resultTable: isSet(object.resultTable) ? Table.fromJSON(object.resultTable) : undefined, - isAppendOnly: isSet(object.isAppendOnly) ? Boolean(object.isAppendOnly) : false, - distinctDedupTables: isObject(object.distinctDedupTables) - ? Object.entries(object.distinctDedupTables).reduce<{ [key: number]: Table }>((acc, [key, value]) => { - acc[Number(key)] = Table.fromJSON(value); - return acc; - }, {}) - : {}, - rowCountIndex: isSet(object.rowCountIndex) ? Number(object.rowCountIndex) : 0, - }; - }, - - toJSON(message: HashAggNode): unknown { - const obj: any = {}; - if (message.groupKey) { - obj.groupKey = message.groupKey.map((e) => Math.round(e)); - } else { - obj.groupKey = []; - } - if (message.aggCalls) { - obj.aggCalls = message.aggCalls.map((e) => e ? AggCall.toJSON(e) : undefined); - } else { - obj.aggCalls = []; - } - if (message.aggCallStates) { - obj.aggCallStates = message.aggCallStates.map((e) => e ? AggCallState.toJSON(e) : undefined); - } else { - obj.aggCallStates = []; - } - message.resultTable !== undefined && - (obj.resultTable = message.resultTable ? Table.toJSON(message.resultTable) : undefined); - message.isAppendOnly !== undefined && (obj.isAppendOnly = message.isAppendOnly); - obj.distinctDedupTables = {}; - if (message.distinctDedupTables) { - Object.entries(message.distinctDedupTables).forEach(([k, v]) => { - obj.distinctDedupTables[k] = Table.toJSON(v); - }); - } - message.rowCountIndex !== undefined && (obj.rowCountIndex = Math.round(message.rowCountIndex)); - return obj; - }, - - fromPartial, I>>(object: I): HashAggNode { - const message = createBaseHashAggNode(); - message.groupKey = object.groupKey?.map((e) => e) || []; - message.aggCalls = object.aggCalls?.map((e) => AggCall.fromPartial(e)) || []; - message.aggCallStates = object.aggCallStates?.map((e) => AggCallState.fromPartial(e)) || []; - message.resultTable = (object.resultTable !== undefined && object.resultTable !== null) - ? Table.fromPartial(object.resultTable) - : undefined; - message.isAppendOnly = object.isAppendOnly ?? false; - message.distinctDedupTables = Object.entries(object.distinctDedupTables ?? {}).reduce<{ [key: number]: Table }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = Table.fromPartial(value); - } - return acc; - }, - {}, - ); - message.rowCountIndex = object.rowCountIndex ?? 0; - return message; - }, -}; - -function createBaseHashAggNode_DistinctDedupTablesEntry(): HashAggNode_DistinctDedupTablesEntry { - return { key: 0, value: undefined }; -} - -export const HashAggNode_DistinctDedupTablesEntry = { - fromJSON(object: any): HashAggNode_DistinctDedupTablesEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? Table.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: HashAggNode_DistinctDedupTablesEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? Table.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): HashAggNode_DistinctDedupTablesEntry { - const message = createBaseHashAggNode_DistinctDedupTablesEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) ? Table.fromPartial(object.value) : undefined; - return message; - }, -}; - -function createBaseTopNNode(): TopNNode { - return { limit: 0, offset: 0, table: undefined, orderBy: [], withTies: false }; -} - -export const TopNNode = { - fromJSON(object: any): TopNNode { - return { - limit: isSet(object.limit) ? Number(object.limit) : 0, - offset: isSet(object.offset) ? Number(object.offset) : 0, - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - orderBy: Array.isArray(object?.orderBy) ? object.orderBy.map((e: any) => ColumnOrder.fromJSON(e)) : [], - withTies: isSet(object.withTies) ? Boolean(object.withTies) : false, - }; - }, - - toJSON(message: TopNNode): unknown { - const obj: any = {}; - message.limit !== undefined && (obj.limit = Math.round(message.limit)); - message.offset !== undefined && (obj.offset = Math.round(message.offset)); - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - if (message.orderBy) { - obj.orderBy = message.orderBy.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.orderBy = []; - } - message.withTies !== undefined && (obj.withTies = message.withTies); - return obj; - }, - - fromPartial, I>>(object: I): TopNNode { - const message = createBaseTopNNode(); - message.limit = object.limit ?? 0; - message.offset = object.offset ?? 0; - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.orderBy = object.orderBy?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.withTies = object.withTies ?? false; - return message; - }, -}; - -function createBaseGroupTopNNode(): GroupTopNNode { - return { limit: 0, offset: 0, groupKey: [], table: undefined, orderBy: [], withTies: false }; -} - -export const GroupTopNNode = { - fromJSON(object: any): GroupTopNNode { - return { - limit: isSet(object.limit) ? Number(object.limit) : 0, - offset: isSet(object.offset) ? Number(object.offset) : 0, - groupKey: Array.isArray(object?.groupKey) ? object.groupKey.map((e: any) => Number(e)) : [], - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - orderBy: Array.isArray(object?.orderBy) ? object.orderBy.map((e: any) => ColumnOrder.fromJSON(e)) : [], - withTies: isSet(object.withTies) ? Boolean(object.withTies) : false, - }; - }, - - toJSON(message: GroupTopNNode): unknown { - const obj: any = {}; - message.limit !== undefined && (obj.limit = Math.round(message.limit)); - message.offset !== undefined && (obj.offset = Math.round(message.offset)); - if (message.groupKey) { - obj.groupKey = message.groupKey.map((e) => Math.round(e)); - } else { - obj.groupKey = []; - } - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - if (message.orderBy) { - obj.orderBy = message.orderBy.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.orderBy = []; - } - message.withTies !== undefined && (obj.withTies = message.withTies); - return obj; - }, - - fromPartial, I>>(object: I): GroupTopNNode { - const message = createBaseGroupTopNNode(); - message.limit = object.limit ?? 0; - message.offset = object.offset ?? 0; - message.groupKey = object.groupKey?.map((e) => e) || []; - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.orderBy = object.orderBy?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.withTies = object.withTies ?? false; - return message; - }, -}; - -function createBaseHashJoinNode(): HashJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - leftKey: [], - rightKey: [], - condition: undefined, - leftTable: undefined, - rightTable: undefined, - leftDegreeTable: undefined, - rightDegreeTable: undefined, - outputIndices: [], - leftDedupedInputPkIndices: [], - rightDedupedInputPkIndices: [], - nullSafe: [], - isAppendOnly: false, - }; -} - -export const HashJoinNode = { - fromJSON(object: any): HashJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - leftKey: Array.isArray(object?.leftKey) ? object.leftKey.map((e: any) => Number(e)) : [], - rightKey: Array.isArray(object?.rightKey) ? object.rightKey.map((e: any) => Number(e)) : [], - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - leftTable: isSet(object.leftTable) ? Table.fromJSON(object.leftTable) : undefined, - rightTable: isSet(object.rightTable) ? Table.fromJSON(object.rightTable) : undefined, - leftDegreeTable: isSet(object.leftDegreeTable) ? Table.fromJSON(object.leftDegreeTable) : undefined, - rightDegreeTable: isSet(object.rightDegreeTable) ? Table.fromJSON(object.rightDegreeTable) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - leftDedupedInputPkIndices: Array.isArray(object?.leftDedupedInputPkIndices) - ? object.leftDedupedInputPkIndices.map((e: any) => Number(e)) - : [], - rightDedupedInputPkIndices: Array.isArray(object?.rightDedupedInputPkIndices) - ? object.rightDedupedInputPkIndices.map((e: any) => Number(e)) - : [], - nullSafe: Array.isArray(object?.nullSafe) ? object.nullSafe.map((e: any) => Boolean(e)) : [], - isAppendOnly: isSet(object.isAppendOnly) ? Boolean(object.isAppendOnly) : false, - }; - }, - - toJSON(message: HashJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - if (message.leftKey) { - obj.leftKey = message.leftKey.map((e) => Math.round(e)); - } else { - obj.leftKey = []; - } - if (message.rightKey) { - obj.rightKey = message.rightKey.map((e) => Math.round(e)); - } else { - obj.rightKey = []; - } - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - message.leftTable !== undefined && - (obj.leftTable = message.leftTable ? Table.toJSON(message.leftTable) : undefined); - message.rightTable !== undefined && - (obj.rightTable = message.rightTable ? Table.toJSON(message.rightTable) : undefined); - message.leftDegreeTable !== undefined && - (obj.leftDegreeTable = message.leftDegreeTable ? Table.toJSON(message.leftDegreeTable) : undefined); - message.rightDegreeTable !== undefined && - (obj.rightDegreeTable = message.rightDegreeTable ? Table.toJSON(message.rightDegreeTable) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.leftDedupedInputPkIndices) { - obj.leftDedupedInputPkIndices = message.leftDedupedInputPkIndices.map((e) => Math.round(e)); - } else { - obj.leftDedupedInputPkIndices = []; - } - if (message.rightDedupedInputPkIndices) { - obj.rightDedupedInputPkIndices = message.rightDedupedInputPkIndices.map((e) => Math.round(e)); - } else { - obj.rightDedupedInputPkIndices = []; - } - if (message.nullSafe) { - obj.nullSafe = message.nullSafe.map((e) => e); - } else { - obj.nullSafe = []; - } - message.isAppendOnly !== undefined && (obj.isAppendOnly = message.isAppendOnly); - return obj; - }, - - fromPartial, I>>(object: I): HashJoinNode { - const message = createBaseHashJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.leftKey = object.leftKey?.map((e) => e) || []; - message.rightKey = object.rightKey?.map((e) => e) || []; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.leftTable = (object.leftTable !== undefined && object.leftTable !== null) - ? Table.fromPartial(object.leftTable) - : undefined; - message.rightTable = (object.rightTable !== undefined && object.rightTable !== null) - ? Table.fromPartial(object.rightTable) - : undefined; - message.leftDegreeTable = (object.leftDegreeTable !== undefined && object.leftDegreeTable !== null) - ? Table.fromPartial(object.leftDegreeTable) - : undefined; - message.rightDegreeTable = (object.rightDegreeTable !== undefined && object.rightDegreeTable !== null) - ? Table.fromPartial(object.rightDegreeTable) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.leftDedupedInputPkIndices = object.leftDedupedInputPkIndices?.map((e) => e) || []; - message.rightDedupedInputPkIndices = object.rightDedupedInputPkIndices?.map((e) => e) || []; - message.nullSafe = object.nullSafe?.map((e) => e) || []; - message.isAppendOnly = object.isAppendOnly ?? false; - return message; - }, -}; - -function createBaseDynamicFilterNode(): DynamicFilterNode { - return { leftKey: 0, condition: undefined, leftTable: undefined, rightTable: undefined }; -} - -export const DynamicFilterNode = { - fromJSON(object: any): DynamicFilterNode { - return { - leftKey: isSet(object.leftKey) ? Number(object.leftKey) : 0, - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - leftTable: isSet(object.leftTable) ? Table.fromJSON(object.leftTable) : undefined, - rightTable: isSet(object.rightTable) ? Table.fromJSON(object.rightTable) : undefined, - }; - }, - - toJSON(message: DynamicFilterNode): unknown { - const obj: any = {}; - message.leftKey !== undefined && (obj.leftKey = Math.round(message.leftKey)); - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - message.leftTable !== undefined && - (obj.leftTable = message.leftTable ? Table.toJSON(message.leftTable) : undefined); - message.rightTable !== undefined && - (obj.rightTable = message.rightTable ? Table.toJSON(message.rightTable) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): DynamicFilterNode { - const message = createBaseDynamicFilterNode(); - message.leftKey = object.leftKey ?? 0; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.leftTable = (object.leftTable !== undefined && object.leftTable !== null) - ? Table.fromPartial(object.leftTable) - : undefined; - message.rightTable = (object.rightTable !== undefined && object.rightTable !== null) - ? Table.fromPartial(object.rightTable) - : undefined; - return message; - }, -}; - -function createBaseDeltaIndexJoinNode(): DeltaIndexJoinNode { - return { - joinType: JoinType.UNSPECIFIED, - leftKey: [], - rightKey: [], - condition: undefined, - leftTableId: 0, - rightTableId: 0, - leftInfo: undefined, - rightInfo: undefined, - outputIndices: [], - }; -} - -export const DeltaIndexJoinNode = { - fromJSON(object: any): DeltaIndexJoinNode { - return { - joinType: isSet(object.joinType) ? joinTypeFromJSON(object.joinType) : JoinType.UNSPECIFIED, - leftKey: Array.isArray(object?.leftKey) ? object.leftKey.map((e: any) => Number(e)) : [], - rightKey: Array.isArray(object?.rightKey) ? object.rightKey.map((e: any) => Number(e)) : [], - condition: isSet(object.condition) ? ExprNode.fromJSON(object.condition) : undefined, - leftTableId: isSet(object.leftTableId) ? Number(object.leftTableId) : 0, - rightTableId: isSet(object.rightTableId) ? Number(object.rightTableId) : 0, - leftInfo: isSet(object.leftInfo) ? ArrangementInfo.fromJSON(object.leftInfo) : undefined, - rightInfo: isSet(object.rightInfo) ? ArrangementInfo.fromJSON(object.rightInfo) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: DeltaIndexJoinNode): unknown { - const obj: any = {}; - message.joinType !== undefined && (obj.joinType = joinTypeToJSON(message.joinType)); - if (message.leftKey) { - obj.leftKey = message.leftKey.map((e) => Math.round(e)); - } else { - obj.leftKey = []; - } - if (message.rightKey) { - obj.rightKey = message.rightKey.map((e) => Math.round(e)); - } else { - obj.rightKey = []; - } - message.condition !== undefined && - (obj.condition = message.condition ? ExprNode.toJSON(message.condition) : undefined); - message.leftTableId !== undefined && (obj.leftTableId = Math.round(message.leftTableId)); - message.rightTableId !== undefined && (obj.rightTableId = Math.round(message.rightTableId)); - message.leftInfo !== undefined && - (obj.leftInfo = message.leftInfo ? ArrangementInfo.toJSON(message.leftInfo) : undefined); - message.rightInfo !== undefined && - (obj.rightInfo = message.rightInfo ? ArrangementInfo.toJSON(message.rightInfo) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DeltaIndexJoinNode { - const message = createBaseDeltaIndexJoinNode(); - message.joinType = object.joinType ?? JoinType.UNSPECIFIED; - message.leftKey = object.leftKey?.map((e) => e) || []; - message.rightKey = object.rightKey?.map((e) => e) || []; - message.condition = (object.condition !== undefined && object.condition !== null) - ? ExprNode.fromPartial(object.condition) - : undefined; - message.leftTableId = object.leftTableId ?? 0; - message.rightTableId = object.rightTableId ?? 0; - message.leftInfo = (object.leftInfo !== undefined && object.leftInfo !== null) - ? ArrangementInfo.fromPartial(object.leftInfo) - : undefined; - message.rightInfo = (object.rightInfo !== undefined && object.rightInfo !== null) - ? ArrangementInfo.fromPartial(object.rightInfo) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseHopWindowNode(): HopWindowNode { - return { - timeCol: 0, - windowSlide: undefined, - windowSize: undefined, - outputIndices: [], - windowStartExprs: [], - windowEndExprs: [], - }; -} - -export const HopWindowNode = { - fromJSON(object: any): HopWindowNode { - return { - timeCol: isSet(object.timeCol) ? Number(object.timeCol) : 0, - windowSlide: isSet(object.windowSlide) ? IntervalUnit.fromJSON(object.windowSlide) : undefined, - windowSize: isSet(object.windowSize) ? IntervalUnit.fromJSON(object.windowSize) : undefined, - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - windowStartExprs: Array.isArray(object?.windowStartExprs) - ? object.windowStartExprs.map((e: any) => ExprNode.fromJSON(e)) - : [], - windowEndExprs: Array.isArray(object?.windowEndExprs) - ? object.windowEndExprs.map((e: any) => ExprNode.fromJSON(e)) - : [], - }; - }, - - toJSON(message: HopWindowNode): unknown { - const obj: any = {}; - message.timeCol !== undefined && (obj.timeCol = Math.round(message.timeCol)); - message.windowSlide !== undefined && - (obj.windowSlide = message.windowSlide ? IntervalUnit.toJSON(message.windowSlide) : undefined); - message.windowSize !== undefined && - (obj.windowSize = message.windowSize ? IntervalUnit.toJSON(message.windowSize) : undefined); - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - if (message.windowStartExprs) { - obj.windowStartExprs = message.windowStartExprs.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.windowStartExprs = []; - } - if (message.windowEndExprs) { - obj.windowEndExprs = message.windowEndExprs.map((e) => e ? ExprNode.toJSON(e) : undefined); - } else { - obj.windowEndExprs = []; - } - return obj; - }, - - fromPartial, I>>(object: I): HopWindowNode { - const message = createBaseHopWindowNode(); - message.timeCol = object.timeCol ?? 0; - message.windowSlide = (object.windowSlide !== undefined && object.windowSlide !== null) - ? IntervalUnit.fromPartial(object.windowSlide) - : undefined; - message.windowSize = (object.windowSize !== undefined && object.windowSize !== null) - ? IntervalUnit.fromPartial(object.windowSize) - : undefined; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.windowStartExprs = object.windowStartExprs?.map((e) => ExprNode.fromPartial(e)) || []; - message.windowEndExprs = object.windowEndExprs?.map((e) => ExprNode.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseMergeNode(): MergeNode { - return { upstreamActorId: [], upstreamFragmentId: 0, upstreamDispatcherType: DispatcherType.UNSPECIFIED, fields: [] }; -} - -export const MergeNode = { - fromJSON(object: any): MergeNode { - return { - upstreamActorId: Array.isArray(object?.upstreamActorId) ? object.upstreamActorId.map((e: any) => Number(e)) : [], - upstreamFragmentId: isSet(object.upstreamFragmentId) ? Number(object.upstreamFragmentId) : 0, - upstreamDispatcherType: isSet(object.upstreamDispatcherType) - ? dispatcherTypeFromJSON(object.upstreamDispatcherType) - : DispatcherType.UNSPECIFIED, - fields: Array.isArray(object?.fields) ? object.fields.map((e: any) => Field.fromJSON(e)) : [], - }; - }, - - toJSON(message: MergeNode): unknown { - const obj: any = {}; - if (message.upstreamActorId) { - obj.upstreamActorId = message.upstreamActorId.map((e) => Math.round(e)); - } else { - obj.upstreamActorId = []; - } - message.upstreamFragmentId !== undefined && (obj.upstreamFragmentId = Math.round(message.upstreamFragmentId)); - message.upstreamDispatcherType !== undefined && - (obj.upstreamDispatcherType = dispatcherTypeToJSON(message.upstreamDispatcherType)); - if (message.fields) { - obj.fields = message.fields.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.fields = []; - } - return obj; - }, - - fromPartial, I>>(object: I): MergeNode { - const message = createBaseMergeNode(); - message.upstreamActorId = object.upstreamActorId?.map((e) => e) || []; - message.upstreamFragmentId = object.upstreamFragmentId ?? 0; - message.upstreamDispatcherType = object.upstreamDispatcherType ?? DispatcherType.UNSPECIFIED; - message.fields = object.fields?.map((e) => Field.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseExchangeNode(): ExchangeNode { - return { strategy: undefined }; -} - -export const ExchangeNode = { - fromJSON(object: any): ExchangeNode { - return { strategy: isSet(object.strategy) ? DispatchStrategy.fromJSON(object.strategy) : undefined }; - }, - - toJSON(message: ExchangeNode): unknown { - const obj: any = {}; - message.strategy !== undefined && - (obj.strategy = message.strategy ? DispatchStrategy.toJSON(message.strategy) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ExchangeNode { - const message = createBaseExchangeNode(); - message.strategy = (object.strategy !== undefined && object.strategy !== null) - ? DispatchStrategy.fromPartial(object.strategy) - : undefined; - return message; - }, -}; - -function createBaseChainNode(): ChainNode { - return { - tableId: 0, - upstreamFields: [], - upstreamColumnIndices: [], - chainType: ChainType.CHAIN_UNSPECIFIED, - isSingleton: false, - tableDesc: undefined, - }; -} - -export const ChainNode = { - fromJSON(object: any): ChainNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - upstreamFields: Array.isArray(object?.upstreamFields) - ? object.upstreamFields.map((e: any) => Field.fromJSON(e)) - : [], - upstreamColumnIndices: Array.isArray(object?.upstreamColumnIndices) - ? object.upstreamColumnIndices.map((e: any) => Number(e)) - : [], - chainType: isSet(object.chainType) ? chainTypeFromJSON(object.chainType) : ChainType.CHAIN_UNSPECIFIED, - isSingleton: isSet(object.isSingleton) ? Boolean(object.isSingleton) : false, - tableDesc: isSet(object.tableDesc) ? StorageTableDesc.fromJSON(object.tableDesc) : undefined, - }; - }, - - toJSON(message: ChainNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - if (message.upstreamFields) { - obj.upstreamFields = message.upstreamFields.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.upstreamFields = []; - } - if (message.upstreamColumnIndices) { - obj.upstreamColumnIndices = message.upstreamColumnIndices.map((e) => Math.round(e)); - } else { - obj.upstreamColumnIndices = []; - } - message.chainType !== undefined && (obj.chainType = chainTypeToJSON(message.chainType)); - message.isSingleton !== undefined && (obj.isSingleton = message.isSingleton); - message.tableDesc !== undefined && - (obj.tableDesc = message.tableDesc ? StorageTableDesc.toJSON(message.tableDesc) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ChainNode { - const message = createBaseChainNode(); - message.tableId = object.tableId ?? 0; - message.upstreamFields = object.upstreamFields?.map((e) => Field.fromPartial(e)) || []; - message.upstreamColumnIndices = object.upstreamColumnIndices?.map((e) => e) || []; - message.chainType = object.chainType ?? ChainType.CHAIN_UNSPECIFIED; - message.isSingleton = object.isSingleton ?? false; - message.tableDesc = (object.tableDesc !== undefined && object.tableDesc !== null) - ? StorageTableDesc.fromPartial(object.tableDesc) - : undefined; - return message; - }, -}; - -function createBaseBatchPlanNode(): BatchPlanNode { - return { tableDesc: undefined, columnIds: [] }; -} - -export const BatchPlanNode = { - fromJSON(object: any): BatchPlanNode { - return { - tableDesc: isSet(object.tableDesc) ? StorageTableDesc.fromJSON(object.tableDesc) : undefined, - columnIds: Array.isArray(object?.columnIds) ? object.columnIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: BatchPlanNode): unknown { - const obj: any = {}; - message.tableDesc !== undefined && - (obj.tableDesc = message.tableDesc ? StorageTableDesc.toJSON(message.tableDesc) : undefined); - if (message.columnIds) { - obj.columnIds = message.columnIds.map((e) => Math.round(e)); - } else { - obj.columnIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): BatchPlanNode { - const message = createBaseBatchPlanNode(); - message.tableDesc = (object.tableDesc !== undefined && object.tableDesc !== null) - ? StorageTableDesc.fromPartial(object.tableDesc) - : undefined; - message.columnIds = object.columnIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseArrangementInfo(): ArrangementInfo { - return { arrangeKeyOrders: [], columnDescs: [], tableDesc: undefined }; -} - -export const ArrangementInfo = { - fromJSON(object: any): ArrangementInfo { - return { - arrangeKeyOrders: Array.isArray(object?.arrangeKeyOrders) - ? object.arrangeKeyOrders.map((e: any) => ColumnOrder.fromJSON(e)) - : [], - columnDescs: Array.isArray(object?.columnDescs) - ? object.columnDescs.map((e: any) => ColumnDesc.fromJSON(e)) - : [], - tableDesc: isSet(object.tableDesc) ? StorageTableDesc.fromJSON(object.tableDesc) : undefined, - }; - }, - - toJSON(message: ArrangementInfo): unknown { - const obj: any = {}; - if (message.arrangeKeyOrders) { - obj.arrangeKeyOrders = message.arrangeKeyOrders.map((e) => e ? ColumnOrder.toJSON(e) : undefined); - } else { - obj.arrangeKeyOrders = []; - } - if (message.columnDescs) { - obj.columnDescs = message.columnDescs.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.columnDescs = []; - } - message.tableDesc !== undefined && - (obj.tableDesc = message.tableDesc ? StorageTableDesc.toJSON(message.tableDesc) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ArrangementInfo { - const message = createBaseArrangementInfo(); - message.arrangeKeyOrders = object.arrangeKeyOrders?.map((e) => ColumnOrder.fromPartial(e)) || []; - message.columnDescs = object.columnDescs?.map((e) => ColumnDesc.fromPartial(e)) || []; - message.tableDesc = (object.tableDesc !== undefined && object.tableDesc !== null) - ? StorageTableDesc.fromPartial(object.tableDesc) - : undefined; - return message; - }, -}; - -function createBaseArrangeNode(): ArrangeNode { - return { - tableInfo: undefined, - distributionKey: [], - table: undefined, - handlePkConflictBehavior: HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - }; -} - -export const ArrangeNode = { - fromJSON(object: any): ArrangeNode { - return { - tableInfo: isSet(object.tableInfo) ? ArrangementInfo.fromJSON(object.tableInfo) : undefined, - distributionKey: Array.isArray(object?.distributionKey) ? object.distributionKey.map((e: any) => Number(e)) : [], - table: isSet(object.table) ? Table.fromJSON(object.table) : undefined, - handlePkConflictBehavior: isSet(object.handlePkConflictBehavior) - ? handleConflictBehaviorFromJSON(object.handlePkConflictBehavior) - : HandleConflictBehavior.NO_CHECK_UNSPECIFIED, - }; - }, - - toJSON(message: ArrangeNode): unknown { - const obj: any = {}; - message.tableInfo !== undefined && - (obj.tableInfo = message.tableInfo ? ArrangementInfo.toJSON(message.tableInfo) : undefined); - if (message.distributionKey) { - obj.distributionKey = message.distributionKey.map((e) => Math.round(e)); - } else { - obj.distributionKey = []; - } - message.table !== undefined && (obj.table = message.table ? Table.toJSON(message.table) : undefined); - message.handlePkConflictBehavior !== undefined && - (obj.handlePkConflictBehavior = handleConflictBehaviorToJSON(message.handlePkConflictBehavior)); - return obj; - }, - - fromPartial, I>>(object: I): ArrangeNode { - const message = createBaseArrangeNode(); - message.tableInfo = (object.tableInfo !== undefined && object.tableInfo !== null) - ? ArrangementInfo.fromPartial(object.tableInfo) - : undefined; - message.distributionKey = object.distributionKey?.map((e) => e) || []; - message.table = (object.table !== undefined && object.table !== null) ? Table.fromPartial(object.table) : undefined; - message.handlePkConflictBehavior = object.handlePkConflictBehavior ?? HandleConflictBehavior.NO_CHECK_UNSPECIFIED; - return message; - }, -}; - -function createBaseLookupNode(): LookupNode { - return { - arrangeKey: [], - streamKey: [], - useCurrentEpoch: false, - columnMapping: [], - arrangementTableId: undefined, - arrangementTableInfo: undefined, - }; -} - -export const LookupNode = { - fromJSON(object: any): LookupNode { - return { - arrangeKey: Array.isArray(object?.arrangeKey) ? object.arrangeKey.map((e: any) => Number(e)) : [], - streamKey: Array.isArray(object?.streamKey) ? object.streamKey.map((e: any) => Number(e)) : [], - useCurrentEpoch: isSet(object.useCurrentEpoch) ? Boolean(object.useCurrentEpoch) : false, - columnMapping: Array.isArray(object?.columnMapping) ? object.columnMapping.map((e: any) => Number(e)) : [], - arrangementTableId: isSet(object.tableId) - ? { $case: "tableId", tableId: Number(object.tableId) } - : isSet(object.indexId) - ? { $case: "indexId", indexId: Number(object.indexId) } - : undefined, - arrangementTableInfo: isSet(object.arrangementTableInfo) - ? ArrangementInfo.fromJSON(object.arrangementTableInfo) - : undefined, - }; - }, - - toJSON(message: LookupNode): unknown { - const obj: any = {}; - if (message.arrangeKey) { - obj.arrangeKey = message.arrangeKey.map((e) => Math.round(e)); - } else { - obj.arrangeKey = []; - } - if (message.streamKey) { - obj.streamKey = message.streamKey.map((e) => Math.round(e)); - } else { - obj.streamKey = []; - } - message.useCurrentEpoch !== undefined && (obj.useCurrentEpoch = message.useCurrentEpoch); - if (message.columnMapping) { - obj.columnMapping = message.columnMapping.map((e) => Math.round(e)); - } else { - obj.columnMapping = []; - } - message.arrangementTableId?.$case === "tableId" && (obj.tableId = Math.round(message.arrangementTableId?.tableId)); - message.arrangementTableId?.$case === "indexId" && (obj.indexId = Math.round(message.arrangementTableId?.indexId)); - message.arrangementTableInfo !== undefined && (obj.arrangementTableInfo = message.arrangementTableInfo - ? ArrangementInfo.toJSON(message.arrangementTableInfo) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): LookupNode { - const message = createBaseLookupNode(); - message.arrangeKey = object.arrangeKey?.map((e) => e) || []; - message.streamKey = object.streamKey?.map((e) => e) || []; - message.useCurrentEpoch = object.useCurrentEpoch ?? false; - message.columnMapping = object.columnMapping?.map((e) => e) || []; - if ( - object.arrangementTableId?.$case === "tableId" && - object.arrangementTableId?.tableId !== undefined && - object.arrangementTableId?.tableId !== null - ) { - message.arrangementTableId = { $case: "tableId", tableId: object.arrangementTableId.tableId }; - } - if ( - object.arrangementTableId?.$case === "indexId" && - object.arrangementTableId?.indexId !== undefined && - object.arrangementTableId?.indexId !== null - ) { - message.arrangementTableId = { $case: "indexId", indexId: object.arrangementTableId.indexId }; - } - message.arrangementTableInfo = (object.arrangementTableInfo !== undefined && object.arrangementTableInfo !== null) - ? ArrangementInfo.fromPartial(object.arrangementTableInfo) - : undefined; - return message; - }, -}; - -function createBaseWatermarkFilterNode(): WatermarkFilterNode { - return { watermarkDescs: [], tables: [] }; -} - -export const WatermarkFilterNode = { - fromJSON(object: any): WatermarkFilterNode { - return { - watermarkDescs: Array.isArray(object?.watermarkDescs) - ? object.watermarkDescs.map((e: any) => WatermarkDesc.fromJSON(e)) - : [], - tables: Array.isArray(object?.tables) - ? object.tables.map((e: any) => Table.fromJSON(e)) - : [], - }; - }, - - toJSON(message: WatermarkFilterNode): unknown { - const obj: any = {}; - if (message.watermarkDescs) { - obj.watermarkDescs = message.watermarkDescs.map((e) => e ? WatermarkDesc.toJSON(e) : undefined); - } else { - obj.watermarkDescs = []; - } - if (message.tables) { - obj.tables = message.tables.map((e) => e ? Table.toJSON(e) : undefined); - } else { - obj.tables = []; - } - return obj; - }, - - fromPartial, I>>(object: I): WatermarkFilterNode { - const message = createBaseWatermarkFilterNode(); - message.watermarkDescs = object.watermarkDescs?.map((e) => WatermarkDesc.fromPartial(e)) || []; - message.tables = object.tables?.map((e) => Table.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseUnionNode(): UnionNode { - return {}; -} - -export const UnionNode = { - fromJSON(_: any): UnionNode { - return {}; - }, - - toJSON(_: UnionNode): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>(_: I): UnionNode { - const message = createBaseUnionNode(); - return message; - }, -}; - -function createBaseLookupUnionNode(): LookupUnionNode { - return { order: [] }; -} - -export const LookupUnionNode = { - fromJSON(object: any): LookupUnionNode { - return { order: Array.isArray(object?.order) ? object.order.map((e: any) => Number(e)) : [] }; - }, - - toJSON(message: LookupUnionNode): unknown { - const obj: any = {}; - if (message.order) { - obj.order = message.order.map((e) => Math.round(e)); - } else { - obj.order = []; - } - return obj; - }, - - fromPartial, I>>(object: I): LookupUnionNode { - const message = createBaseLookupUnionNode(); - message.order = object.order?.map((e) => e) || []; - return message; - }, -}; - -function createBaseExpandNode(): ExpandNode { - return { columnSubsets: [] }; -} - -export const ExpandNode = { - fromJSON(object: any): ExpandNode { - return { - columnSubsets: Array.isArray(object?.columnSubsets) - ? object.columnSubsets.map((e: any) => ExpandNode_Subset.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ExpandNode): unknown { - const obj: any = {}; - if (message.columnSubsets) { - obj.columnSubsets = message.columnSubsets.map((e) => e ? ExpandNode_Subset.toJSON(e) : undefined); - } else { - obj.columnSubsets = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExpandNode { - const message = createBaseExpandNode(); - message.columnSubsets = object.columnSubsets?.map((e) => ExpandNode_Subset.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseExpandNode_Subset(): ExpandNode_Subset { - return { columnIndices: [] }; -} - -export const ExpandNode_Subset = { - fromJSON(object: any): ExpandNode_Subset { - return { - columnIndices: Array.isArray(object?.columnIndices) ? object.columnIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: ExpandNode_Subset): unknown { - const obj: any = {}; - if (message.columnIndices) { - obj.columnIndices = message.columnIndices.map((e) => Math.round(e)); - } else { - obj.columnIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ExpandNode_Subset { - const message = createBaseExpandNode_Subset(); - message.columnIndices = object.columnIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseProjectSetNode(): ProjectSetNode { - return { selectList: [] }; -} - -export const ProjectSetNode = { - fromJSON(object: any): ProjectSetNode { - return { - selectList: Array.isArray(object?.selectList) - ? object.selectList.map((e: any) => ProjectSetSelectItem.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ProjectSetNode): unknown { - const obj: any = {}; - if (message.selectList) { - obj.selectList = message.selectList.map((e) => e ? ProjectSetSelectItem.toJSON(e) : undefined); - } else { - obj.selectList = []; - } - return obj; - }, - - fromPartial, I>>(object: I): ProjectSetNode { - const message = createBaseProjectSetNode(); - message.selectList = object.selectList?.map((e) => ProjectSetSelectItem.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseSortNode(): SortNode { - return { stateTable: undefined, sortColumnIndex: 0 }; -} - -export const SortNode = { - fromJSON(object: any): SortNode { - return { - stateTable: isSet(object.stateTable) ? Table.fromJSON(object.stateTable) : undefined, - sortColumnIndex: isSet(object.sortColumnIndex) ? Number(object.sortColumnIndex) : 0, - }; - }, - - toJSON(message: SortNode): unknown { - const obj: any = {}; - message.stateTable !== undefined && - (obj.stateTable = message.stateTable ? Table.toJSON(message.stateTable) : undefined); - message.sortColumnIndex !== undefined && (obj.sortColumnIndex = Math.round(message.sortColumnIndex)); - return obj; - }, - - fromPartial, I>>(object: I): SortNode { - const message = createBaseSortNode(); - message.stateTable = (object.stateTable !== undefined && object.stateTable !== null) - ? Table.fromPartial(object.stateTable) - : undefined; - message.sortColumnIndex = object.sortColumnIndex ?? 0; - return message; - }, -}; - -function createBaseDmlNode(): DmlNode { - return { tableId: 0, tableVersionId: 0, columnDescs: [] }; -} - -export const DmlNode = { - fromJSON(object: any): DmlNode { - return { - tableId: isSet(object.tableId) ? Number(object.tableId) : 0, - tableVersionId: isSet(object.tableVersionId) ? Number(object.tableVersionId) : 0, - columnDescs: Array.isArray(object?.columnDescs) ? object.columnDescs.map((e: any) => ColumnDesc.fromJSON(e)) : [], - }; - }, - - toJSON(message: DmlNode): unknown { - const obj: any = {}; - message.tableId !== undefined && (obj.tableId = Math.round(message.tableId)); - message.tableVersionId !== undefined && (obj.tableVersionId = Math.round(message.tableVersionId)); - if (message.columnDescs) { - obj.columnDescs = message.columnDescs.map((e) => e ? ColumnDesc.toJSON(e) : undefined); - } else { - obj.columnDescs = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DmlNode { - const message = createBaseDmlNode(); - message.tableId = object.tableId ?? 0; - message.tableVersionId = object.tableVersionId ?? 0; - message.columnDescs = object.columnDescs?.map((e) => ColumnDesc.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseRowIdGenNode(): RowIdGenNode { - return { rowIdIndex: 0 }; -} - -export const RowIdGenNode = { - fromJSON(object: any): RowIdGenNode { - return { rowIdIndex: isSet(object.rowIdIndex) ? Number(object.rowIdIndex) : 0 }; - }, - - toJSON(message: RowIdGenNode): unknown { - const obj: any = {}; - message.rowIdIndex !== undefined && (obj.rowIdIndex = Math.round(message.rowIdIndex)); - return obj; - }, - - fromPartial, I>>(object: I): RowIdGenNode { - const message = createBaseRowIdGenNode(); - message.rowIdIndex = object.rowIdIndex ?? 0; - return message; - }, -}; - -function createBaseNowNode(): NowNode { - return { stateTable: undefined }; -} - -export const NowNode = { - fromJSON(object: any): NowNode { - return { stateTable: isSet(object.stateTable) ? Table.fromJSON(object.stateTable) : undefined }; - }, - - toJSON(message: NowNode): unknown { - const obj: any = {}; - message.stateTable !== undefined && - (obj.stateTable = message.stateTable ? Table.toJSON(message.stateTable) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): NowNode { - const message = createBaseNowNode(); - message.stateTable = (object.stateTable !== undefined && object.stateTable !== null) - ? Table.fromPartial(object.stateTable) - : undefined; - return message; - }, -}; - -function createBaseStreamNode(): StreamNode { - return { nodeBody: undefined, operatorId: 0, input: [], streamKey: [], appendOnly: false, identity: "", fields: [] }; -} - -export const StreamNode = { - fromJSON(object: any): StreamNode { - return { - nodeBody: isSet(object.source) - ? { $case: "source", source: SourceNode.fromJSON(object.source) } - : isSet(object.project) - ? { $case: "project", project: ProjectNode.fromJSON(object.project) } - : isSet(object.filter) - ? { $case: "filter", filter: FilterNode.fromJSON(object.filter) } - : isSet(object.materialize) - ? { $case: "materialize", materialize: MaterializeNode.fromJSON(object.materialize) } - : isSet(object.localSimpleAgg) - ? { $case: "localSimpleAgg", localSimpleAgg: SimpleAggNode.fromJSON(object.localSimpleAgg) } - : isSet(object.globalSimpleAgg) - ? { $case: "globalSimpleAgg", globalSimpleAgg: SimpleAggNode.fromJSON(object.globalSimpleAgg) } - : isSet(object.hashAgg) - ? { $case: "hashAgg", hashAgg: HashAggNode.fromJSON(object.hashAgg) } - : isSet(object.appendOnlyTopN) - ? { $case: "appendOnlyTopN", appendOnlyTopN: TopNNode.fromJSON(object.appendOnlyTopN) } - : isSet(object.hashJoin) - ? { $case: "hashJoin", hashJoin: HashJoinNode.fromJSON(object.hashJoin) } - : isSet(object.topN) - ? { $case: "topN", topN: TopNNode.fromJSON(object.topN) } - : isSet(object.hopWindow) - ? { $case: "hopWindow", hopWindow: HopWindowNode.fromJSON(object.hopWindow) } - : isSet(object.merge) - ? { $case: "merge", merge: MergeNode.fromJSON(object.merge) } - : isSet(object.exchange) - ? { $case: "exchange", exchange: ExchangeNode.fromJSON(object.exchange) } - : isSet(object.chain) - ? { $case: "chain", chain: ChainNode.fromJSON(object.chain) } - : isSet(object.batchPlan) - ? { $case: "batchPlan", batchPlan: BatchPlanNode.fromJSON(object.batchPlan) } - : isSet(object.lookup) - ? { $case: "lookup", lookup: LookupNode.fromJSON(object.lookup) } - : isSet(object.arrange) - ? { $case: "arrange", arrange: ArrangeNode.fromJSON(object.arrange) } - : isSet(object.lookupUnion) - ? { $case: "lookupUnion", lookupUnion: LookupUnionNode.fromJSON(object.lookupUnion) } - : isSet(object.union) - ? { $case: "union", union: UnionNode.fromJSON(object.union) } - : isSet(object.deltaIndexJoin) - ? { $case: "deltaIndexJoin", deltaIndexJoin: DeltaIndexJoinNode.fromJSON(object.deltaIndexJoin) } - : isSet(object.sink) - ? { $case: "sink", sink: SinkNode.fromJSON(object.sink) } - : isSet(object.expand) - ? { $case: "expand", expand: ExpandNode.fromJSON(object.expand) } - : isSet(object.dynamicFilter) - ? { $case: "dynamicFilter", dynamicFilter: DynamicFilterNode.fromJSON(object.dynamicFilter) } - : isSet(object.projectSet) - ? { $case: "projectSet", projectSet: ProjectSetNode.fromJSON(object.projectSet) } - : isSet(object.groupTopN) - ? { $case: "groupTopN", groupTopN: GroupTopNNode.fromJSON(object.groupTopN) } - : isSet(object.sort) - ? { $case: "sort", sort: SortNode.fromJSON(object.sort) } - : isSet(object.watermarkFilter) - ? { $case: "watermarkFilter", watermarkFilter: WatermarkFilterNode.fromJSON(object.watermarkFilter) } - : isSet(object.dml) - ? { $case: "dml", dml: DmlNode.fromJSON(object.dml) } - : isSet(object.rowIdGen) - ? { $case: "rowIdGen", rowIdGen: RowIdGenNode.fromJSON(object.rowIdGen) } - : isSet(object.now) - ? { $case: "now", now: NowNode.fromJSON(object.now) } - : isSet(object.appendOnlyGroupTopN) - ? { $case: "appendOnlyGroupTopN", appendOnlyGroupTopN: GroupTopNNode.fromJSON(object.appendOnlyGroupTopN) } - : undefined, - operatorId: isSet(object.operatorId) ? Number(object.operatorId) : 0, - input: Array.isArray(object?.input) - ? object.input.map((e: any) => StreamNode.fromJSON(e)) - : [], - streamKey: Array.isArray(object?.streamKey) ? object.streamKey.map((e: any) => Number(e)) : [], - appendOnly: isSet(object.appendOnly) ? Boolean(object.appendOnly) : false, - identity: isSet(object.identity) ? String(object.identity) : "", - fields: Array.isArray(object?.fields) ? object.fields.map((e: any) => Field.fromJSON(e)) : [], - }; - }, - - toJSON(message: StreamNode): unknown { - const obj: any = {}; - message.nodeBody?.$case === "source" && - (obj.source = message.nodeBody?.source ? SourceNode.toJSON(message.nodeBody?.source) : undefined); - message.nodeBody?.$case === "project" && - (obj.project = message.nodeBody?.project ? ProjectNode.toJSON(message.nodeBody?.project) : undefined); - message.nodeBody?.$case === "filter" && - (obj.filter = message.nodeBody?.filter ? FilterNode.toJSON(message.nodeBody?.filter) : undefined); - message.nodeBody?.$case === "materialize" && (obj.materialize = message.nodeBody?.materialize - ? MaterializeNode.toJSON(message.nodeBody?.materialize) - : undefined); - message.nodeBody?.$case === "localSimpleAgg" && (obj.localSimpleAgg = message.nodeBody?.localSimpleAgg - ? SimpleAggNode.toJSON(message.nodeBody?.localSimpleAgg) - : undefined); - message.nodeBody?.$case === "globalSimpleAgg" && (obj.globalSimpleAgg = message.nodeBody?.globalSimpleAgg - ? SimpleAggNode.toJSON(message.nodeBody?.globalSimpleAgg) - : undefined); - message.nodeBody?.$case === "hashAgg" && - (obj.hashAgg = message.nodeBody?.hashAgg ? HashAggNode.toJSON(message.nodeBody?.hashAgg) : undefined); - message.nodeBody?.$case === "appendOnlyTopN" && (obj.appendOnlyTopN = message.nodeBody?.appendOnlyTopN - ? TopNNode.toJSON(message.nodeBody?.appendOnlyTopN) - : undefined); - message.nodeBody?.$case === "hashJoin" && - (obj.hashJoin = message.nodeBody?.hashJoin ? HashJoinNode.toJSON(message.nodeBody?.hashJoin) : undefined); - message.nodeBody?.$case === "topN" && - (obj.topN = message.nodeBody?.topN ? TopNNode.toJSON(message.nodeBody?.topN) : undefined); - message.nodeBody?.$case === "hopWindow" && - (obj.hopWindow = message.nodeBody?.hopWindow ? HopWindowNode.toJSON(message.nodeBody?.hopWindow) : undefined); - message.nodeBody?.$case === "merge" && - (obj.merge = message.nodeBody?.merge ? MergeNode.toJSON(message.nodeBody?.merge) : undefined); - message.nodeBody?.$case === "exchange" && - (obj.exchange = message.nodeBody?.exchange ? ExchangeNode.toJSON(message.nodeBody?.exchange) : undefined); - message.nodeBody?.$case === "chain" && - (obj.chain = message.nodeBody?.chain ? ChainNode.toJSON(message.nodeBody?.chain) : undefined); - message.nodeBody?.$case === "batchPlan" && - (obj.batchPlan = message.nodeBody?.batchPlan ? BatchPlanNode.toJSON(message.nodeBody?.batchPlan) : undefined); - message.nodeBody?.$case === "lookup" && - (obj.lookup = message.nodeBody?.lookup ? LookupNode.toJSON(message.nodeBody?.lookup) : undefined); - message.nodeBody?.$case === "arrange" && - (obj.arrange = message.nodeBody?.arrange ? ArrangeNode.toJSON(message.nodeBody?.arrange) : undefined); - message.nodeBody?.$case === "lookupUnion" && (obj.lookupUnion = message.nodeBody?.lookupUnion - ? LookupUnionNode.toJSON(message.nodeBody?.lookupUnion) - : undefined); - message.nodeBody?.$case === "union" && - (obj.union = message.nodeBody?.union ? UnionNode.toJSON(message.nodeBody?.union) : undefined); - message.nodeBody?.$case === "deltaIndexJoin" && (obj.deltaIndexJoin = message.nodeBody?.deltaIndexJoin - ? DeltaIndexJoinNode.toJSON(message.nodeBody?.deltaIndexJoin) - : undefined); - message.nodeBody?.$case === "sink" && - (obj.sink = message.nodeBody?.sink ? SinkNode.toJSON(message.nodeBody?.sink) : undefined); - message.nodeBody?.$case === "expand" && - (obj.expand = message.nodeBody?.expand ? ExpandNode.toJSON(message.nodeBody?.expand) : undefined); - message.nodeBody?.$case === "dynamicFilter" && (obj.dynamicFilter = message.nodeBody?.dynamicFilter - ? DynamicFilterNode.toJSON(message.nodeBody?.dynamicFilter) - : undefined); - message.nodeBody?.$case === "projectSet" && - (obj.projectSet = message.nodeBody?.projectSet ? ProjectSetNode.toJSON(message.nodeBody?.projectSet) : undefined); - message.nodeBody?.$case === "groupTopN" && - (obj.groupTopN = message.nodeBody?.groupTopN ? GroupTopNNode.toJSON(message.nodeBody?.groupTopN) : undefined); - message.nodeBody?.$case === "sort" && - (obj.sort = message.nodeBody?.sort ? SortNode.toJSON(message.nodeBody?.sort) : undefined); - message.nodeBody?.$case === "watermarkFilter" && (obj.watermarkFilter = message.nodeBody?.watermarkFilter - ? WatermarkFilterNode.toJSON(message.nodeBody?.watermarkFilter) - : undefined); - message.nodeBody?.$case === "dml" && - (obj.dml = message.nodeBody?.dml ? DmlNode.toJSON(message.nodeBody?.dml) : undefined); - message.nodeBody?.$case === "rowIdGen" && - (obj.rowIdGen = message.nodeBody?.rowIdGen ? RowIdGenNode.toJSON(message.nodeBody?.rowIdGen) : undefined); - message.nodeBody?.$case === "now" && - (obj.now = message.nodeBody?.now ? NowNode.toJSON(message.nodeBody?.now) : undefined); - message.nodeBody?.$case === "appendOnlyGroupTopN" && - (obj.appendOnlyGroupTopN = message.nodeBody?.appendOnlyGroupTopN - ? GroupTopNNode.toJSON(message.nodeBody?.appendOnlyGroupTopN) - : undefined); - message.operatorId !== undefined && (obj.operatorId = Math.round(message.operatorId)); - if (message.input) { - obj.input = message.input.map((e) => - e ? StreamNode.toJSON(e) : undefined - ); - } else { - obj.input = []; - } - if (message.streamKey) { - obj.streamKey = message.streamKey.map((e) => - Math.round(e) - ); - } else { - obj.streamKey = []; - } - message.appendOnly !== undefined && (obj.appendOnly = message.appendOnly); - message.identity !== undefined && (obj.identity = message.identity); - if (message.fields) { - obj.fields = message.fields.map((e) => e ? Field.toJSON(e) : undefined); - } else { - obj.fields = []; - } - return obj; - }, - - fromPartial, I>>(object: I): StreamNode { - const message = createBaseStreamNode(); - if ( - object.nodeBody?.$case === "source" && object.nodeBody?.source !== undefined && object.nodeBody?.source !== null - ) { - message.nodeBody = { $case: "source", source: SourceNode.fromPartial(object.nodeBody.source) }; - } - if ( - object.nodeBody?.$case === "project" && - object.nodeBody?.project !== undefined && - object.nodeBody?.project !== null - ) { - message.nodeBody = { $case: "project", project: ProjectNode.fromPartial(object.nodeBody.project) }; - } - if ( - object.nodeBody?.$case === "filter" && object.nodeBody?.filter !== undefined && object.nodeBody?.filter !== null - ) { - message.nodeBody = { $case: "filter", filter: FilterNode.fromPartial(object.nodeBody.filter) }; - } - if ( - object.nodeBody?.$case === "materialize" && - object.nodeBody?.materialize !== undefined && - object.nodeBody?.materialize !== null - ) { - message.nodeBody = { - $case: "materialize", - materialize: MaterializeNode.fromPartial(object.nodeBody.materialize), - }; - } - if ( - object.nodeBody?.$case === "localSimpleAgg" && - object.nodeBody?.localSimpleAgg !== undefined && - object.nodeBody?.localSimpleAgg !== null - ) { - message.nodeBody = { - $case: "localSimpleAgg", - localSimpleAgg: SimpleAggNode.fromPartial(object.nodeBody.localSimpleAgg), - }; - } - if ( - object.nodeBody?.$case === "globalSimpleAgg" && - object.nodeBody?.globalSimpleAgg !== undefined && - object.nodeBody?.globalSimpleAgg !== null - ) { - message.nodeBody = { - $case: "globalSimpleAgg", - globalSimpleAgg: SimpleAggNode.fromPartial(object.nodeBody.globalSimpleAgg), - }; - } - if ( - object.nodeBody?.$case === "hashAgg" && - object.nodeBody?.hashAgg !== undefined && - object.nodeBody?.hashAgg !== null - ) { - message.nodeBody = { $case: "hashAgg", hashAgg: HashAggNode.fromPartial(object.nodeBody.hashAgg) }; - } - if ( - object.nodeBody?.$case === "appendOnlyTopN" && - object.nodeBody?.appendOnlyTopN !== undefined && - object.nodeBody?.appendOnlyTopN !== null - ) { - message.nodeBody = { - $case: "appendOnlyTopN", - appendOnlyTopN: TopNNode.fromPartial(object.nodeBody.appendOnlyTopN), - }; - } - if ( - object.nodeBody?.$case === "hashJoin" && - object.nodeBody?.hashJoin !== undefined && - object.nodeBody?.hashJoin !== null - ) { - message.nodeBody = { $case: "hashJoin", hashJoin: HashJoinNode.fromPartial(object.nodeBody.hashJoin) }; - } - if (object.nodeBody?.$case === "topN" && object.nodeBody?.topN !== undefined && object.nodeBody?.topN !== null) { - message.nodeBody = { $case: "topN", topN: TopNNode.fromPartial(object.nodeBody.topN) }; - } - if ( - object.nodeBody?.$case === "hopWindow" && - object.nodeBody?.hopWindow !== undefined && - object.nodeBody?.hopWindow !== null - ) { - message.nodeBody = { $case: "hopWindow", hopWindow: HopWindowNode.fromPartial(object.nodeBody.hopWindow) }; - } - if (object.nodeBody?.$case === "merge" && object.nodeBody?.merge !== undefined && object.nodeBody?.merge !== null) { - message.nodeBody = { $case: "merge", merge: MergeNode.fromPartial(object.nodeBody.merge) }; - } - if ( - object.nodeBody?.$case === "exchange" && - object.nodeBody?.exchange !== undefined && - object.nodeBody?.exchange !== null - ) { - message.nodeBody = { $case: "exchange", exchange: ExchangeNode.fromPartial(object.nodeBody.exchange) }; - } - if (object.nodeBody?.$case === "chain" && object.nodeBody?.chain !== undefined && object.nodeBody?.chain !== null) { - message.nodeBody = { $case: "chain", chain: ChainNode.fromPartial(object.nodeBody.chain) }; - } - if ( - object.nodeBody?.$case === "batchPlan" && - object.nodeBody?.batchPlan !== undefined && - object.nodeBody?.batchPlan !== null - ) { - message.nodeBody = { $case: "batchPlan", batchPlan: BatchPlanNode.fromPartial(object.nodeBody.batchPlan) }; - } - if ( - object.nodeBody?.$case === "lookup" && object.nodeBody?.lookup !== undefined && object.nodeBody?.lookup !== null - ) { - message.nodeBody = { $case: "lookup", lookup: LookupNode.fromPartial(object.nodeBody.lookup) }; - } - if ( - object.nodeBody?.$case === "arrange" && - object.nodeBody?.arrange !== undefined && - object.nodeBody?.arrange !== null - ) { - message.nodeBody = { $case: "arrange", arrange: ArrangeNode.fromPartial(object.nodeBody.arrange) }; - } - if ( - object.nodeBody?.$case === "lookupUnion" && - object.nodeBody?.lookupUnion !== undefined && - object.nodeBody?.lookupUnion !== null - ) { - message.nodeBody = { - $case: "lookupUnion", - lookupUnion: LookupUnionNode.fromPartial(object.nodeBody.lookupUnion), - }; - } - if (object.nodeBody?.$case === "union" && object.nodeBody?.union !== undefined && object.nodeBody?.union !== null) { - message.nodeBody = { $case: "union", union: UnionNode.fromPartial(object.nodeBody.union) }; - } - if ( - object.nodeBody?.$case === "deltaIndexJoin" && - object.nodeBody?.deltaIndexJoin !== undefined && - object.nodeBody?.deltaIndexJoin !== null - ) { - message.nodeBody = { - $case: "deltaIndexJoin", - deltaIndexJoin: DeltaIndexJoinNode.fromPartial(object.nodeBody.deltaIndexJoin), - }; - } - if (object.nodeBody?.$case === "sink" && object.nodeBody?.sink !== undefined && object.nodeBody?.sink !== null) { - message.nodeBody = { $case: "sink", sink: SinkNode.fromPartial(object.nodeBody.sink) }; - } - if ( - object.nodeBody?.$case === "expand" && object.nodeBody?.expand !== undefined && object.nodeBody?.expand !== null - ) { - message.nodeBody = { $case: "expand", expand: ExpandNode.fromPartial(object.nodeBody.expand) }; - } - if ( - object.nodeBody?.$case === "dynamicFilter" && - object.nodeBody?.dynamicFilter !== undefined && - object.nodeBody?.dynamicFilter !== null - ) { - message.nodeBody = { - $case: "dynamicFilter", - dynamicFilter: DynamicFilterNode.fromPartial(object.nodeBody.dynamicFilter), - }; - } - if ( - object.nodeBody?.$case === "projectSet" && - object.nodeBody?.projectSet !== undefined && - object.nodeBody?.projectSet !== null - ) { - message.nodeBody = { $case: "projectSet", projectSet: ProjectSetNode.fromPartial(object.nodeBody.projectSet) }; - } - if ( - object.nodeBody?.$case === "groupTopN" && - object.nodeBody?.groupTopN !== undefined && - object.nodeBody?.groupTopN !== null - ) { - message.nodeBody = { $case: "groupTopN", groupTopN: GroupTopNNode.fromPartial(object.nodeBody.groupTopN) }; - } - if (object.nodeBody?.$case === "sort" && object.nodeBody?.sort !== undefined && object.nodeBody?.sort !== null) { - message.nodeBody = { $case: "sort", sort: SortNode.fromPartial(object.nodeBody.sort) }; - } - if ( - object.nodeBody?.$case === "watermarkFilter" && - object.nodeBody?.watermarkFilter !== undefined && - object.nodeBody?.watermarkFilter !== null - ) { - message.nodeBody = { - $case: "watermarkFilter", - watermarkFilter: WatermarkFilterNode.fromPartial(object.nodeBody.watermarkFilter), - }; - } - if (object.nodeBody?.$case === "dml" && object.nodeBody?.dml !== undefined && object.nodeBody?.dml !== null) { - message.nodeBody = { $case: "dml", dml: DmlNode.fromPartial(object.nodeBody.dml) }; - } - if ( - object.nodeBody?.$case === "rowIdGen" && - object.nodeBody?.rowIdGen !== undefined && - object.nodeBody?.rowIdGen !== null - ) { - message.nodeBody = { $case: "rowIdGen", rowIdGen: RowIdGenNode.fromPartial(object.nodeBody.rowIdGen) }; - } - if (object.nodeBody?.$case === "now" && object.nodeBody?.now !== undefined && object.nodeBody?.now !== null) { - message.nodeBody = { $case: "now", now: NowNode.fromPartial(object.nodeBody.now) }; - } - if ( - object.nodeBody?.$case === "appendOnlyGroupTopN" && - object.nodeBody?.appendOnlyGroupTopN !== undefined && - object.nodeBody?.appendOnlyGroupTopN !== null - ) { - message.nodeBody = { - $case: "appendOnlyGroupTopN", - appendOnlyGroupTopN: GroupTopNNode.fromPartial(object.nodeBody.appendOnlyGroupTopN), - }; - } - message.operatorId = object.operatorId ?? 0; - message.input = object.input?.map((e) => StreamNode.fromPartial(e)) || []; - message.streamKey = object.streamKey?.map((e) => e) || []; - message.appendOnly = object.appendOnly ?? false; - message.identity = object.identity ?? ""; - message.fields = object.fields?.map((e) => Field.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseDispatchStrategy(): DispatchStrategy { - return { type: DispatcherType.UNSPECIFIED, distKeyIndices: [], outputIndices: [] }; -} - -export const DispatchStrategy = { - fromJSON(object: any): DispatchStrategy { - return { - type: isSet(object.type) ? dispatcherTypeFromJSON(object.type) : DispatcherType.UNSPECIFIED, - distKeyIndices: Array.isArray(object?.distKeyIndices) ? object.distKeyIndices.map((e: any) => Number(e)) : [], - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: DispatchStrategy): unknown { - const obj: any = {}; - message.type !== undefined && (obj.type = dispatcherTypeToJSON(message.type)); - if (message.distKeyIndices) { - obj.distKeyIndices = message.distKeyIndices.map((e) => Math.round(e)); - } else { - obj.distKeyIndices = []; - } - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DispatchStrategy { - const message = createBaseDispatchStrategy(); - message.type = object.type ?? DispatcherType.UNSPECIFIED; - message.distKeyIndices = object.distKeyIndices?.map((e) => e) || []; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - return message; - }, -}; - -function createBaseDispatcher(): Dispatcher { - return { - type: DispatcherType.UNSPECIFIED, - distKeyIndices: [], - outputIndices: [], - hashMapping: undefined, - dispatcherId: 0, - downstreamActorId: [], - }; -} - -export const Dispatcher = { - fromJSON(object: any): Dispatcher { - return { - type: isSet(object.type) ? dispatcherTypeFromJSON(object.type) : DispatcherType.UNSPECIFIED, - distKeyIndices: Array.isArray(object?.distKeyIndices) ? object.distKeyIndices.map((e: any) => Number(e)) : [], - outputIndices: Array.isArray(object?.outputIndices) ? object.outputIndices.map((e: any) => Number(e)) : [], - hashMapping: isSet(object.hashMapping) ? ActorMapping.fromJSON(object.hashMapping) : undefined, - dispatcherId: isSet(object.dispatcherId) ? Number(object.dispatcherId) : 0, - downstreamActorId: Array.isArray(object?.downstreamActorId) - ? object.downstreamActorId.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: Dispatcher): unknown { - const obj: any = {}; - message.type !== undefined && (obj.type = dispatcherTypeToJSON(message.type)); - if (message.distKeyIndices) { - obj.distKeyIndices = message.distKeyIndices.map((e) => Math.round(e)); - } else { - obj.distKeyIndices = []; - } - if (message.outputIndices) { - obj.outputIndices = message.outputIndices.map((e) => Math.round(e)); - } else { - obj.outputIndices = []; - } - message.hashMapping !== undefined && - (obj.hashMapping = message.hashMapping ? ActorMapping.toJSON(message.hashMapping) : undefined); - message.dispatcherId !== undefined && (obj.dispatcherId = Math.round(message.dispatcherId)); - if (message.downstreamActorId) { - obj.downstreamActorId = message.downstreamActorId.map((e) => Math.round(e)); - } else { - obj.downstreamActorId = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Dispatcher { - const message = createBaseDispatcher(); - message.type = object.type ?? DispatcherType.UNSPECIFIED; - message.distKeyIndices = object.distKeyIndices?.map((e) => e) || []; - message.outputIndices = object.outputIndices?.map((e) => e) || []; - message.hashMapping = (object.hashMapping !== undefined && object.hashMapping !== null) - ? ActorMapping.fromPartial(object.hashMapping) - : undefined; - message.dispatcherId = object.dispatcherId ?? 0; - message.downstreamActorId = object.downstreamActorId?.map((e) => e) || []; - return message; - }, -}; - -function createBaseStreamActor(): StreamActor { - return { - actorId: 0, - fragmentId: 0, - nodes: undefined, - dispatcher: [], - upstreamActorId: [], - vnodeBitmap: undefined, - mviewDefinition: "", - }; -} - -export const StreamActor = { - fromJSON(object: any): StreamActor { - return { - actorId: isSet(object.actorId) ? Number(object.actorId) : 0, - fragmentId: isSet(object.fragmentId) ? Number(object.fragmentId) : 0, - nodes: isSet(object.nodes) ? StreamNode.fromJSON(object.nodes) : undefined, - dispatcher: Array.isArray(object?.dispatcher) ? object.dispatcher.map((e: any) => Dispatcher.fromJSON(e)) : [], - upstreamActorId: Array.isArray(object?.upstreamActorId) ? object.upstreamActorId.map((e: any) => Number(e)) : [], - vnodeBitmap: isSet(object.vnodeBitmap) ? Buffer.fromJSON(object.vnodeBitmap) : undefined, - mviewDefinition: isSet(object.mviewDefinition) ? String(object.mviewDefinition) : "", - }; - }, - - toJSON(message: StreamActor): unknown { - const obj: any = {}; - message.actorId !== undefined && (obj.actorId = Math.round(message.actorId)); - message.fragmentId !== undefined && (obj.fragmentId = Math.round(message.fragmentId)); - message.nodes !== undefined && (obj.nodes = message.nodes ? StreamNode.toJSON(message.nodes) : undefined); - if (message.dispatcher) { - obj.dispatcher = message.dispatcher.map((e) => e ? Dispatcher.toJSON(e) : undefined); - } else { - obj.dispatcher = []; - } - if (message.upstreamActorId) { - obj.upstreamActorId = message.upstreamActorId.map((e) => Math.round(e)); - } else { - obj.upstreamActorId = []; - } - message.vnodeBitmap !== undefined && - (obj.vnodeBitmap = message.vnodeBitmap ? Buffer.toJSON(message.vnodeBitmap) : undefined); - message.mviewDefinition !== undefined && (obj.mviewDefinition = message.mviewDefinition); - return obj; - }, - - fromPartial, I>>(object: I): StreamActor { - const message = createBaseStreamActor(); - message.actorId = object.actorId ?? 0; - message.fragmentId = object.fragmentId ?? 0; - message.nodes = (object.nodes !== undefined && object.nodes !== null) - ? StreamNode.fromPartial(object.nodes) - : undefined; - message.dispatcher = object.dispatcher?.map((e) => Dispatcher.fromPartial(e)) || []; - message.upstreamActorId = object.upstreamActorId?.map((e) => e) || []; - message.vnodeBitmap = (object.vnodeBitmap !== undefined && object.vnodeBitmap !== null) - ? Buffer.fromPartial(object.vnodeBitmap) - : undefined; - message.mviewDefinition = object.mviewDefinition ?? ""; - return message; - }, -}; - -function createBaseStreamEnvironment(): StreamEnvironment { - return { timezone: "" }; -} - -export const StreamEnvironment = { - fromJSON(object: any): StreamEnvironment { - return { timezone: isSet(object.timezone) ? String(object.timezone) : "" }; - }, - - toJSON(message: StreamEnvironment): unknown { - const obj: any = {}; - message.timezone !== undefined && (obj.timezone = message.timezone); - return obj; - }, - - fromPartial, I>>(object: I): StreamEnvironment { - const message = createBaseStreamEnvironment(); - message.timezone = object.timezone ?? ""; - return message; - }, -}; - -function createBaseStreamFragmentGraph(): StreamFragmentGraph { - return { fragments: {}, edges: [], dependentRelationIds: [], tableIdsCnt: 0, env: undefined, parallelism: undefined }; -} - -export const StreamFragmentGraph = { - fromJSON(object: any): StreamFragmentGraph { - return { - fragments: isObject(object.fragments) - ? Object.entries(object.fragments).reduce<{ [key: number]: StreamFragmentGraph_StreamFragment }>( - (acc, [key, value]) => { - acc[Number(key)] = StreamFragmentGraph_StreamFragment.fromJSON(value); - return acc; - }, - {}, - ) - : {}, - edges: Array.isArray(object?.edges) - ? object.edges.map((e: any) => StreamFragmentGraph_StreamFragmentEdge.fromJSON(e)) - : [], - dependentRelationIds: Array.isArray(object?.dependentRelationIds) - ? object.dependentRelationIds.map((e: any) => Number(e)) - : [], - tableIdsCnt: isSet(object.tableIdsCnt) ? Number(object.tableIdsCnt) : 0, - env: isSet(object.env) ? StreamEnvironment.fromJSON(object.env) : undefined, - parallelism: isSet(object.parallelism) ? StreamFragmentGraph_Parallelism.fromJSON(object.parallelism) : undefined, - }; - }, - - toJSON(message: StreamFragmentGraph): unknown { - const obj: any = {}; - obj.fragments = {}; - if (message.fragments) { - Object.entries(message.fragments).forEach(([k, v]) => { - obj.fragments[k] = StreamFragmentGraph_StreamFragment.toJSON(v); - }); - } - if (message.edges) { - obj.edges = message.edges.map((e) => e ? StreamFragmentGraph_StreamFragmentEdge.toJSON(e) : undefined); - } else { - obj.edges = []; - } - if (message.dependentRelationIds) { - obj.dependentRelationIds = message.dependentRelationIds.map((e) => Math.round(e)); - } else { - obj.dependentRelationIds = []; - } - message.tableIdsCnt !== undefined && (obj.tableIdsCnt = Math.round(message.tableIdsCnt)); - message.env !== undefined && (obj.env = message.env ? StreamEnvironment.toJSON(message.env) : undefined); - message.parallelism !== undefined && - (obj.parallelism = message.parallelism ? StreamFragmentGraph_Parallelism.toJSON(message.parallelism) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): StreamFragmentGraph { - const message = createBaseStreamFragmentGraph(); - message.fragments = Object.entries(object.fragments ?? {}).reduce< - { [key: number]: StreamFragmentGraph_StreamFragment } - >((acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = StreamFragmentGraph_StreamFragment.fromPartial(value); - } - return acc; - }, {}); - message.edges = object.edges?.map((e) => StreamFragmentGraph_StreamFragmentEdge.fromPartial(e)) || []; - message.dependentRelationIds = object.dependentRelationIds?.map((e) => e) || []; - message.tableIdsCnt = object.tableIdsCnt ?? 0; - message.env = (object.env !== undefined && object.env !== null) - ? StreamEnvironment.fromPartial(object.env) - : undefined; - message.parallelism = (object.parallelism !== undefined && object.parallelism !== null) - ? StreamFragmentGraph_Parallelism.fromPartial(object.parallelism) - : undefined; - return message; - }, -}; - -function createBaseStreamFragmentGraph_StreamFragment(): StreamFragmentGraph_StreamFragment { - return { - fragmentId: 0, - node: undefined, - fragmentTypeMask: 0, - isSingleton: false, - tableIdsCnt: 0, - upstreamTableIds: [], - }; -} - -export const StreamFragmentGraph_StreamFragment = { - fromJSON(object: any): StreamFragmentGraph_StreamFragment { - return { - fragmentId: isSet(object.fragmentId) ? Number(object.fragmentId) : 0, - node: isSet(object.node) ? StreamNode.fromJSON(object.node) : undefined, - fragmentTypeMask: isSet(object.fragmentTypeMask) ? Number(object.fragmentTypeMask) : 0, - isSingleton: isSet(object.isSingleton) ? Boolean(object.isSingleton) : false, - tableIdsCnt: isSet(object.tableIdsCnt) ? Number(object.tableIdsCnt) : 0, - upstreamTableIds: Array.isArray(object?.upstreamTableIds) - ? object.upstreamTableIds.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: StreamFragmentGraph_StreamFragment): unknown { - const obj: any = {}; - message.fragmentId !== undefined && (obj.fragmentId = Math.round(message.fragmentId)); - message.node !== undefined && (obj.node = message.node ? StreamNode.toJSON(message.node) : undefined); - message.fragmentTypeMask !== undefined && (obj.fragmentTypeMask = Math.round(message.fragmentTypeMask)); - message.isSingleton !== undefined && (obj.isSingleton = message.isSingleton); - message.tableIdsCnt !== undefined && (obj.tableIdsCnt = Math.round(message.tableIdsCnt)); - if (message.upstreamTableIds) { - obj.upstreamTableIds = message.upstreamTableIds.map((e) => Math.round(e)); - } else { - obj.upstreamTableIds = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): StreamFragmentGraph_StreamFragment { - const message = createBaseStreamFragmentGraph_StreamFragment(); - message.fragmentId = object.fragmentId ?? 0; - message.node = (object.node !== undefined && object.node !== null) - ? StreamNode.fromPartial(object.node) - : undefined; - message.fragmentTypeMask = object.fragmentTypeMask ?? 0; - message.isSingleton = object.isSingleton ?? false; - message.tableIdsCnt = object.tableIdsCnt ?? 0; - message.upstreamTableIds = object.upstreamTableIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseStreamFragmentGraph_StreamFragmentEdge(): StreamFragmentGraph_StreamFragmentEdge { - return { dispatchStrategy: undefined, linkId: 0, upstreamId: 0, downstreamId: 0 }; -} - -export const StreamFragmentGraph_StreamFragmentEdge = { - fromJSON(object: any): StreamFragmentGraph_StreamFragmentEdge { - return { - dispatchStrategy: isSet(object.dispatchStrategy) ? DispatchStrategy.fromJSON(object.dispatchStrategy) : undefined, - linkId: isSet(object.linkId) ? Number(object.linkId) : 0, - upstreamId: isSet(object.upstreamId) ? Number(object.upstreamId) : 0, - downstreamId: isSet(object.downstreamId) ? Number(object.downstreamId) : 0, - }; - }, - - toJSON(message: StreamFragmentGraph_StreamFragmentEdge): unknown { - const obj: any = {}; - message.dispatchStrategy !== undefined && - (obj.dispatchStrategy = message.dispatchStrategy ? DispatchStrategy.toJSON(message.dispatchStrategy) : undefined); - message.linkId !== undefined && (obj.linkId = Math.round(message.linkId)); - message.upstreamId !== undefined && (obj.upstreamId = Math.round(message.upstreamId)); - message.downstreamId !== undefined && (obj.downstreamId = Math.round(message.downstreamId)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): StreamFragmentGraph_StreamFragmentEdge { - const message = createBaseStreamFragmentGraph_StreamFragmentEdge(); - message.dispatchStrategy = (object.dispatchStrategy !== undefined && object.dispatchStrategy !== null) - ? DispatchStrategy.fromPartial(object.dispatchStrategy) - : undefined; - message.linkId = object.linkId ?? 0; - message.upstreamId = object.upstreamId ?? 0; - message.downstreamId = object.downstreamId ?? 0; - return message; - }, -}; - -function createBaseStreamFragmentGraph_Parallelism(): StreamFragmentGraph_Parallelism { - return { parallelism: 0 }; -} - -export const StreamFragmentGraph_Parallelism = { - fromJSON(object: any): StreamFragmentGraph_Parallelism { - return { parallelism: isSet(object.parallelism) ? Number(object.parallelism) : 0 }; - }, - - toJSON(message: StreamFragmentGraph_Parallelism): unknown { - const obj: any = {}; - message.parallelism !== undefined && (obj.parallelism = Math.round(message.parallelism)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): StreamFragmentGraph_Parallelism { - const message = createBaseStreamFragmentGraph_Parallelism(); - message.parallelism = object.parallelism ?? 0; - return message; - }, -}; - -function createBaseStreamFragmentGraph_FragmentsEntry(): StreamFragmentGraph_FragmentsEntry { - return { key: 0, value: undefined }; -} - -export const StreamFragmentGraph_FragmentsEntry = { - fromJSON(object: any): StreamFragmentGraph_FragmentsEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? StreamFragmentGraph_StreamFragment.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: StreamFragmentGraph_FragmentsEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && - (obj.value = message.value ? StreamFragmentGraph_StreamFragment.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): StreamFragmentGraph_FragmentsEntry { - const message = createBaseStreamFragmentGraph_FragmentsEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? StreamFragmentGraph_StreamFragment.fromPartial(object.value) - : undefined; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/stream_service.ts b/dashboard/proto/gen/stream_service.ts deleted file mode 100644 index 2740cb287c6b2..0000000000000 --- a/dashboard/proto/gen/stream_service.ts +++ /dev/null @@ -1,725 +0,0 @@ -/* eslint-disable */ -import { ActorInfo, Status } from "./common"; -import { SstableInfo, TableStats } from "./hummock"; -import { Barrier, StreamActor } from "./stream_plan"; - -export const protobufPackage = "stream_service"; - -/** Describe the fragments which will be running on this node */ -export interface UpdateActorsRequest { - requestId: string; - actors: StreamActor[]; -} - -export interface UpdateActorsResponse { - status: Status | undefined; -} - -export interface BroadcastActorInfoTableRequest { - info: ActorInfo[]; -} - -/** Create channels and gRPC connections for a fragment */ -export interface BuildActorsRequest { - requestId: string; - actorId: number[]; -} - -export interface BuildActorsResponse { - requestId: string; - status: Status | undefined; -} - -export interface DropActorsRequest { - requestId: string; - actorIds: number[]; -} - -export interface DropActorsResponse { - requestId: string; - status: Status | undefined; -} - -export interface ForceStopActorsRequest { - requestId: string; -} - -export interface ForceStopActorsResponse { - requestId: string; - status: Status | undefined; -} - -export interface InjectBarrierRequest { - requestId: string; - barrier: Barrier | undefined; - actorIdsToSend: number[]; - actorIdsToCollect: number[]; -} - -export interface InjectBarrierResponse { - requestId: string; - status: Status | undefined; -} - -export interface BarrierCompleteRequest { - requestId: string; - prevEpoch: number; -} - -export interface BarrierCompleteResponse { - requestId: string; - status: Status | undefined; - createMviewProgress: BarrierCompleteResponse_CreateMviewProgress[]; - syncedSstables: BarrierCompleteResponse_GroupedSstableInfo[]; - workerId: number; -} - -export interface BarrierCompleteResponse_CreateMviewProgress { - chainActorId: number; - done: boolean; - consumedEpoch: number; - consumedRows: number; -} - -export interface BarrierCompleteResponse_GroupedSstableInfo { - compactionGroupId: number; - sst: SstableInfo | undefined; - tableStatsMap: { [key: number]: TableStats }; -} - -export interface BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry { - key: number; - value: TableStats | undefined; -} - -/** Before starting streaming, the leader node broadcast the actor-host table to needed workers. */ -export interface BroadcastActorInfoTableResponse { - status: Status | undefined; -} - -export interface WaitEpochCommitRequest { - epoch: number; -} - -export interface WaitEpochCommitResponse { - status: Status | undefined; -} - -function createBaseUpdateActorsRequest(): UpdateActorsRequest { - return { requestId: "", actors: [] }; -} - -export const UpdateActorsRequest = { - fromJSON(object: any): UpdateActorsRequest { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - actors: Array.isArray(object?.actors) ? object.actors.map((e: any) => StreamActor.fromJSON(e)) : [], - }; - }, - - toJSON(message: UpdateActorsRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - if (message.actors) { - obj.actors = message.actors.map((e) => e ? StreamActor.toJSON(e) : undefined); - } else { - obj.actors = []; - } - return obj; - }, - - fromPartial, I>>(object: I): UpdateActorsRequest { - const message = createBaseUpdateActorsRequest(); - message.requestId = object.requestId ?? ""; - message.actors = object.actors?.map((e) => StreamActor.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseUpdateActorsResponse(): UpdateActorsResponse { - return { status: undefined }; -} - -export const UpdateActorsResponse = { - fromJSON(object: any): UpdateActorsResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: UpdateActorsResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): UpdateActorsResponse { - const message = createBaseUpdateActorsResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseBroadcastActorInfoTableRequest(): BroadcastActorInfoTableRequest { - return { info: [] }; -} - -export const BroadcastActorInfoTableRequest = { - fromJSON(object: any): BroadcastActorInfoTableRequest { - return { info: Array.isArray(object?.info) ? object.info.map((e: any) => ActorInfo.fromJSON(e)) : [] }; - }, - - toJSON(message: BroadcastActorInfoTableRequest): unknown { - const obj: any = {}; - if (message.info) { - obj.info = message.info.map((e) => e ? ActorInfo.toJSON(e) : undefined); - } else { - obj.info = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): BroadcastActorInfoTableRequest { - const message = createBaseBroadcastActorInfoTableRequest(); - message.info = object.info?.map((e) => ActorInfo.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseBuildActorsRequest(): BuildActorsRequest { - return { requestId: "", actorId: [] }; -} - -export const BuildActorsRequest = { - fromJSON(object: any): BuildActorsRequest { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - actorId: Array.isArray(object?.actorId) ? object.actorId.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: BuildActorsRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - if (message.actorId) { - obj.actorId = message.actorId.map((e) => Math.round(e)); - } else { - obj.actorId = []; - } - return obj; - }, - - fromPartial, I>>(object: I): BuildActorsRequest { - const message = createBaseBuildActorsRequest(); - message.requestId = object.requestId ?? ""; - message.actorId = object.actorId?.map((e) => e) || []; - return message; - }, -}; - -function createBaseBuildActorsResponse(): BuildActorsResponse { - return { requestId: "", status: undefined }; -} - -export const BuildActorsResponse = { - fromJSON(object: any): BuildActorsResponse { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - }; - }, - - toJSON(message: BuildActorsResponse): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): BuildActorsResponse { - const message = createBaseBuildActorsResponse(); - message.requestId = object.requestId ?? ""; - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseDropActorsRequest(): DropActorsRequest { - return { requestId: "", actorIds: [] }; -} - -export const DropActorsRequest = { - fromJSON(object: any): DropActorsRequest { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - actorIds: Array.isArray(object?.actorIds) ? object.actorIds.map((e: any) => Number(e)) : [], - }; - }, - - toJSON(message: DropActorsRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - if (message.actorIds) { - obj.actorIds = message.actorIds.map((e) => Math.round(e)); - } else { - obj.actorIds = []; - } - return obj; - }, - - fromPartial, I>>(object: I): DropActorsRequest { - const message = createBaseDropActorsRequest(); - message.requestId = object.requestId ?? ""; - message.actorIds = object.actorIds?.map((e) => e) || []; - return message; - }, -}; - -function createBaseDropActorsResponse(): DropActorsResponse { - return { requestId: "", status: undefined }; -} - -export const DropActorsResponse = { - fromJSON(object: any): DropActorsResponse { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - }; - }, - - toJSON(message: DropActorsResponse): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): DropActorsResponse { - const message = createBaseDropActorsResponse(); - message.requestId = object.requestId ?? ""; - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseForceStopActorsRequest(): ForceStopActorsRequest { - return { requestId: "" }; -} - -export const ForceStopActorsRequest = { - fromJSON(object: any): ForceStopActorsRequest { - return { requestId: isSet(object.requestId) ? String(object.requestId) : "" }; - }, - - toJSON(message: ForceStopActorsRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - return obj; - }, - - fromPartial, I>>(object: I): ForceStopActorsRequest { - const message = createBaseForceStopActorsRequest(); - message.requestId = object.requestId ?? ""; - return message; - }, -}; - -function createBaseForceStopActorsResponse(): ForceStopActorsResponse { - return { requestId: "", status: undefined }; -} - -export const ForceStopActorsResponse = { - fromJSON(object: any): ForceStopActorsResponse { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - }; - }, - - toJSON(message: ForceStopActorsResponse): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ForceStopActorsResponse { - const message = createBaseForceStopActorsResponse(); - message.requestId = object.requestId ?? ""; - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseInjectBarrierRequest(): InjectBarrierRequest { - return { requestId: "", barrier: undefined, actorIdsToSend: [], actorIdsToCollect: [] }; -} - -export const InjectBarrierRequest = { - fromJSON(object: any): InjectBarrierRequest { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - barrier: isSet(object.barrier) ? Barrier.fromJSON(object.barrier) : undefined, - actorIdsToSend: Array.isArray(object?.actorIdsToSend) ? object.actorIdsToSend.map((e: any) => Number(e)) : [], - actorIdsToCollect: Array.isArray(object?.actorIdsToCollect) - ? object.actorIdsToCollect.map((e: any) => Number(e)) - : [], - }; - }, - - toJSON(message: InjectBarrierRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.barrier !== undefined && (obj.barrier = message.barrier ? Barrier.toJSON(message.barrier) : undefined); - if (message.actorIdsToSend) { - obj.actorIdsToSend = message.actorIdsToSend.map((e) => Math.round(e)); - } else { - obj.actorIdsToSend = []; - } - if (message.actorIdsToCollect) { - obj.actorIdsToCollect = message.actorIdsToCollect.map((e) => Math.round(e)); - } else { - obj.actorIdsToCollect = []; - } - return obj; - }, - - fromPartial, I>>(object: I): InjectBarrierRequest { - const message = createBaseInjectBarrierRequest(); - message.requestId = object.requestId ?? ""; - message.barrier = (object.barrier !== undefined && object.barrier !== null) - ? Barrier.fromPartial(object.barrier) - : undefined; - message.actorIdsToSend = object.actorIdsToSend?.map((e) => e) || []; - message.actorIdsToCollect = object.actorIdsToCollect?.map((e) => e) || []; - return message; - }, -}; - -function createBaseInjectBarrierResponse(): InjectBarrierResponse { - return { requestId: "", status: undefined }; -} - -export const InjectBarrierResponse = { - fromJSON(object: any): InjectBarrierResponse { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - }; - }, - - toJSON(message: InjectBarrierResponse): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): InjectBarrierResponse { - const message = createBaseInjectBarrierResponse(); - message.requestId = object.requestId ?? ""; - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseBarrierCompleteRequest(): BarrierCompleteRequest { - return { requestId: "", prevEpoch: 0 }; -} - -export const BarrierCompleteRequest = { - fromJSON(object: any): BarrierCompleteRequest { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - prevEpoch: isSet(object.prevEpoch) ? Number(object.prevEpoch) : 0, - }; - }, - - toJSON(message: BarrierCompleteRequest): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.prevEpoch !== undefined && (obj.prevEpoch = Math.round(message.prevEpoch)); - return obj; - }, - - fromPartial, I>>(object: I): BarrierCompleteRequest { - const message = createBaseBarrierCompleteRequest(); - message.requestId = object.requestId ?? ""; - message.prevEpoch = object.prevEpoch ?? 0; - return message; - }, -}; - -function createBaseBarrierCompleteResponse(): BarrierCompleteResponse { - return { requestId: "", status: undefined, createMviewProgress: [], syncedSstables: [], workerId: 0 }; -} - -export const BarrierCompleteResponse = { - fromJSON(object: any): BarrierCompleteResponse { - return { - requestId: isSet(object.requestId) ? String(object.requestId) : "", - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - createMviewProgress: Array.isArray(object?.createMviewProgress) - ? object.createMviewProgress.map((e: any) => BarrierCompleteResponse_CreateMviewProgress.fromJSON(e)) - : [], - syncedSstables: Array.isArray(object?.syncedSstables) - ? object.syncedSstables.map((e: any) => BarrierCompleteResponse_GroupedSstableInfo.fromJSON(e)) - : [], - workerId: isSet(object.workerId) ? Number(object.workerId) : 0, - }; - }, - - toJSON(message: BarrierCompleteResponse): unknown { - const obj: any = {}; - message.requestId !== undefined && (obj.requestId = message.requestId); - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - if (message.createMviewProgress) { - obj.createMviewProgress = message.createMviewProgress.map((e) => - e ? BarrierCompleteResponse_CreateMviewProgress.toJSON(e) : undefined - ); - } else { - obj.createMviewProgress = []; - } - if (message.syncedSstables) { - obj.syncedSstables = message.syncedSstables.map((e) => - e ? BarrierCompleteResponse_GroupedSstableInfo.toJSON(e) : undefined - ); - } else { - obj.syncedSstables = []; - } - message.workerId !== undefined && (obj.workerId = Math.round(message.workerId)); - return obj; - }, - - fromPartial, I>>(object: I): BarrierCompleteResponse { - const message = createBaseBarrierCompleteResponse(); - message.requestId = object.requestId ?? ""; - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.createMviewProgress = - object.createMviewProgress?.map((e) => BarrierCompleteResponse_CreateMviewProgress.fromPartial(e)) || []; - message.syncedSstables = - object.syncedSstables?.map((e) => BarrierCompleteResponse_GroupedSstableInfo.fromPartial(e)) || []; - message.workerId = object.workerId ?? 0; - return message; - }, -}; - -function createBaseBarrierCompleteResponse_CreateMviewProgress(): BarrierCompleteResponse_CreateMviewProgress { - return { chainActorId: 0, done: false, consumedEpoch: 0, consumedRows: 0 }; -} - -export const BarrierCompleteResponse_CreateMviewProgress = { - fromJSON(object: any): BarrierCompleteResponse_CreateMviewProgress { - return { - chainActorId: isSet(object.chainActorId) ? Number(object.chainActorId) : 0, - done: isSet(object.done) ? Boolean(object.done) : false, - consumedEpoch: isSet(object.consumedEpoch) ? Number(object.consumedEpoch) : 0, - consumedRows: isSet(object.consumedRows) ? Number(object.consumedRows) : 0, - }; - }, - - toJSON(message: BarrierCompleteResponse_CreateMviewProgress): unknown { - const obj: any = {}; - message.chainActorId !== undefined && (obj.chainActorId = Math.round(message.chainActorId)); - message.done !== undefined && (obj.done = message.done); - message.consumedEpoch !== undefined && (obj.consumedEpoch = Math.round(message.consumedEpoch)); - message.consumedRows !== undefined && (obj.consumedRows = Math.round(message.consumedRows)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): BarrierCompleteResponse_CreateMviewProgress { - const message = createBaseBarrierCompleteResponse_CreateMviewProgress(); - message.chainActorId = object.chainActorId ?? 0; - message.done = object.done ?? false; - message.consumedEpoch = object.consumedEpoch ?? 0; - message.consumedRows = object.consumedRows ?? 0; - return message; - }, -}; - -function createBaseBarrierCompleteResponse_GroupedSstableInfo(): BarrierCompleteResponse_GroupedSstableInfo { - return { compactionGroupId: 0, sst: undefined, tableStatsMap: {} }; -} - -export const BarrierCompleteResponse_GroupedSstableInfo = { - fromJSON(object: any): BarrierCompleteResponse_GroupedSstableInfo { - return { - compactionGroupId: isSet(object.compactionGroupId) ? Number(object.compactionGroupId) : 0, - sst: isSet(object.sst) ? SstableInfo.fromJSON(object.sst) : undefined, - tableStatsMap: isObject(object.tableStatsMap) - ? Object.entries(object.tableStatsMap).reduce<{ [key: number]: TableStats }>((acc, [key, value]) => { - acc[Number(key)] = TableStats.fromJSON(value); - return acc; - }, {}) - : {}, - }; - }, - - toJSON(message: BarrierCompleteResponse_GroupedSstableInfo): unknown { - const obj: any = {}; - message.compactionGroupId !== undefined && (obj.compactionGroupId = Math.round(message.compactionGroupId)); - message.sst !== undefined && (obj.sst = message.sst ? SstableInfo.toJSON(message.sst) : undefined); - obj.tableStatsMap = {}; - if (message.tableStatsMap) { - Object.entries(message.tableStatsMap).forEach(([k, v]) => { - obj.tableStatsMap[k] = TableStats.toJSON(v); - }); - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): BarrierCompleteResponse_GroupedSstableInfo { - const message = createBaseBarrierCompleteResponse_GroupedSstableInfo(); - message.compactionGroupId = object.compactionGroupId ?? 0; - message.sst = (object.sst !== undefined && object.sst !== null) ? SstableInfo.fromPartial(object.sst) : undefined; - message.tableStatsMap = Object.entries(object.tableStatsMap ?? {}).reduce<{ [key: number]: TableStats }>( - (acc, [key, value]) => { - if (value !== undefined) { - acc[Number(key)] = TableStats.fromPartial(value); - } - return acc; - }, - {}, - ); - return message; - }, -}; - -function createBaseBarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry(): BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry { - return { key: 0, value: undefined }; -} - -export const BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry = { - fromJSON(object: any): BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry { - return { - key: isSet(object.key) ? Number(object.key) : 0, - value: isSet(object.value) ? TableStats.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = Math.round(message.key)); - message.value !== undefined && (obj.value = message.value ? TableStats.toJSON(message.value) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): BarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry { - const message = createBaseBarrierCompleteResponse_GroupedSstableInfo_TableStatsMapEntry(); - message.key = object.key ?? 0; - message.value = (object.value !== undefined && object.value !== null) - ? TableStats.fromPartial(object.value) - : undefined; - return message; - }, -}; - -function createBaseBroadcastActorInfoTableResponse(): BroadcastActorInfoTableResponse { - return { status: undefined }; -} - -export const BroadcastActorInfoTableResponse = { - fromJSON(object: any): BroadcastActorInfoTableResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: BroadcastActorInfoTableResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): BroadcastActorInfoTableResponse { - const message = createBaseBroadcastActorInfoTableResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseWaitEpochCommitRequest(): WaitEpochCommitRequest { - return { epoch: 0 }; -} - -export const WaitEpochCommitRequest = { - fromJSON(object: any): WaitEpochCommitRequest { - return { epoch: isSet(object.epoch) ? Number(object.epoch) : 0 }; - }, - - toJSON(message: WaitEpochCommitRequest): unknown { - const obj: any = {}; - message.epoch !== undefined && (obj.epoch = Math.round(message.epoch)); - return obj; - }, - - fromPartial, I>>(object: I): WaitEpochCommitRequest { - const message = createBaseWaitEpochCommitRequest(); - message.epoch = object.epoch ?? 0; - return message; - }, -}; - -function createBaseWaitEpochCommitResponse(): WaitEpochCommitResponse { - return { status: undefined }; -} - -export const WaitEpochCommitResponse = { - fromJSON(object: any): WaitEpochCommitResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: WaitEpochCommitResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): WaitEpochCommitResponse { - const message = createBaseWaitEpochCommitResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isObject(value: any): boolean { - return typeof value === "object" && value !== null; -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/task_service.ts b/dashboard/proto/gen/task_service.ts deleted file mode 100644 index 2cfcb1d49bf04..0000000000000 --- a/dashboard/proto/gen/task_service.ts +++ /dev/null @@ -1,544 +0,0 @@ -/* eslint-disable */ -import { PlanFragment, TaskId as TaskId1, TaskOutputId } from "./batch_plan"; -import { BatchQueryEpoch, Status } from "./common"; -import { DataChunk } from "./data"; -import { StreamMessage } from "./stream_plan"; - -export const protobufPackage = "task_service"; - -/** Task is a running instance of Stage. */ -export interface TaskId { - queryId: string; - stageId: number; - taskId: number; -} - -export interface TaskInfoResponse { - taskId: TaskId1 | undefined; - taskStatus: TaskInfoResponse_TaskStatus; - /** Optional error message for failed task. */ - errorMessage: string; -} - -export const TaskInfoResponse_TaskStatus = { - /** UNSPECIFIED - Note: Requirement of proto3: first enum must be 0. */ - UNSPECIFIED: "UNSPECIFIED", - PENDING: "PENDING", - RUNNING: "RUNNING", - FINISHED: "FINISHED", - FAILED: "FAILED", - ABORTED: "ABORTED", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type TaskInfoResponse_TaskStatus = typeof TaskInfoResponse_TaskStatus[keyof typeof TaskInfoResponse_TaskStatus]; - -export function taskInfoResponse_TaskStatusFromJSON(object: any): TaskInfoResponse_TaskStatus { - switch (object) { - case 0: - case "UNSPECIFIED": - return TaskInfoResponse_TaskStatus.UNSPECIFIED; - case 2: - case "PENDING": - return TaskInfoResponse_TaskStatus.PENDING; - case 3: - case "RUNNING": - return TaskInfoResponse_TaskStatus.RUNNING; - case 6: - case "FINISHED": - return TaskInfoResponse_TaskStatus.FINISHED; - case 7: - case "FAILED": - return TaskInfoResponse_TaskStatus.FAILED; - case 8: - case "ABORTED": - return TaskInfoResponse_TaskStatus.ABORTED; - case -1: - case "UNRECOGNIZED": - default: - return TaskInfoResponse_TaskStatus.UNRECOGNIZED; - } -} - -export function taskInfoResponse_TaskStatusToJSON(object: TaskInfoResponse_TaskStatus): string { - switch (object) { - case TaskInfoResponse_TaskStatus.UNSPECIFIED: - return "UNSPECIFIED"; - case TaskInfoResponse_TaskStatus.PENDING: - return "PENDING"; - case TaskInfoResponse_TaskStatus.RUNNING: - return "RUNNING"; - case TaskInfoResponse_TaskStatus.FINISHED: - return "FINISHED"; - case TaskInfoResponse_TaskStatus.FAILED: - return "FAILED"; - case TaskInfoResponse_TaskStatus.ABORTED: - return "ABORTED"; - case TaskInfoResponse_TaskStatus.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface CreateTaskRequest { - taskId: TaskId1 | undefined; - plan: PlanFragment | undefined; - epoch: BatchQueryEpoch | undefined; -} - -export interface AbortTaskRequest { - taskId: TaskId1 | undefined; -} - -export interface AbortTaskResponse { - status: Status | undefined; -} - -export interface GetTaskInfoRequest { - taskId: TaskId1 | undefined; -} - -export interface GetDataResponse { - recordBatch: DataChunk | undefined; -} - -export interface ExecuteRequest { - taskId: TaskId1 | undefined; - plan: PlanFragment | undefined; - epoch: BatchQueryEpoch | undefined; -} - -export interface GetDataRequest { - taskOutputId: TaskOutputId | undefined; -} - -export interface GetStreamRequest { - value?: { $case: "get"; get: GetStreamRequest_Get } | { - $case: "addPermits"; - addPermits: GetStreamRequest_AddPermits; - }; -} - -/** The first message, which tells the upstream which channel this exchange stream is for. */ -export interface GetStreamRequest_Get { - upActorId: number; - downActorId: number; - upFragmentId: number; - downFragmentId: number; -} - -/** The following messages, which adds the permits back to the upstream to achieve back-pressure. */ -export interface GetStreamRequest_AddPermits { - permits: number; -} - -export interface GetStreamResponse { - message: - | StreamMessage - | undefined; - /** The number of permits acquired for this message, which should be sent back to the upstream with `AddPermits`. */ - permits: number; -} - -function createBaseTaskId(): TaskId { - return { queryId: "", stageId: 0, taskId: 0 }; -} - -export const TaskId = { - fromJSON(object: any): TaskId { - return { - queryId: isSet(object.queryId) ? String(object.queryId) : "", - stageId: isSet(object.stageId) ? Number(object.stageId) : 0, - taskId: isSet(object.taskId) ? Number(object.taskId) : 0, - }; - }, - - toJSON(message: TaskId): unknown { - const obj: any = {}; - message.queryId !== undefined && (obj.queryId = message.queryId); - message.stageId !== undefined && (obj.stageId = Math.round(message.stageId)); - message.taskId !== undefined && (obj.taskId = Math.round(message.taskId)); - return obj; - }, - - fromPartial, I>>(object: I): TaskId { - const message = createBaseTaskId(); - message.queryId = object.queryId ?? ""; - message.stageId = object.stageId ?? 0; - message.taskId = object.taskId ?? 0; - return message; - }, -}; - -function createBaseTaskInfoResponse(): TaskInfoResponse { - return { taskId: undefined, taskStatus: TaskInfoResponse_TaskStatus.UNSPECIFIED, errorMessage: "" }; -} - -export const TaskInfoResponse = { - fromJSON(object: any): TaskInfoResponse { - return { - taskId: isSet(object.taskId) ? TaskId1.fromJSON(object.taskId) : undefined, - taskStatus: isSet(object.taskStatus) - ? taskInfoResponse_TaskStatusFromJSON(object.taskStatus) - : TaskInfoResponse_TaskStatus.UNSPECIFIED, - errorMessage: isSet(object.errorMessage) ? String(object.errorMessage) : "", - }; - }, - - toJSON(message: TaskInfoResponse): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId1.toJSON(message.taskId) : undefined); - message.taskStatus !== undefined && (obj.taskStatus = taskInfoResponse_TaskStatusToJSON(message.taskStatus)); - message.errorMessage !== undefined && (obj.errorMessage = message.errorMessage); - return obj; - }, - - fromPartial, I>>(object: I): TaskInfoResponse { - const message = createBaseTaskInfoResponse(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId1.fromPartial(object.taskId) - : undefined; - message.taskStatus = object.taskStatus ?? TaskInfoResponse_TaskStatus.UNSPECIFIED; - message.errorMessage = object.errorMessage ?? ""; - return message; - }, -}; - -function createBaseCreateTaskRequest(): CreateTaskRequest { - return { taskId: undefined, plan: undefined, epoch: undefined }; -} - -export const CreateTaskRequest = { - fromJSON(object: any): CreateTaskRequest { - return { - taskId: isSet(object.taskId) ? TaskId1.fromJSON(object.taskId) : undefined, - plan: isSet(object.plan) ? PlanFragment.fromJSON(object.plan) : undefined, - epoch: isSet(object.epoch) ? BatchQueryEpoch.fromJSON(object.epoch) : undefined, - }; - }, - - toJSON(message: CreateTaskRequest): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId1.toJSON(message.taskId) : undefined); - message.plan !== undefined && (obj.plan = message.plan ? PlanFragment.toJSON(message.plan) : undefined); - message.epoch !== undefined && (obj.epoch = message.epoch ? BatchQueryEpoch.toJSON(message.epoch) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateTaskRequest { - const message = createBaseCreateTaskRequest(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId1.fromPartial(object.taskId) - : undefined; - message.plan = (object.plan !== undefined && object.plan !== null) - ? PlanFragment.fromPartial(object.plan) - : undefined; - message.epoch = (object.epoch !== undefined && object.epoch !== null) - ? BatchQueryEpoch.fromPartial(object.epoch) - : undefined; - return message; - }, -}; - -function createBaseAbortTaskRequest(): AbortTaskRequest { - return { taskId: undefined }; -} - -export const AbortTaskRequest = { - fromJSON(object: any): AbortTaskRequest { - return { taskId: isSet(object.taskId) ? TaskId1.fromJSON(object.taskId) : undefined }; - }, - - toJSON(message: AbortTaskRequest): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId1.toJSON(message.taskId) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AbortTaskRequest { - const message = createBaseAbortTaskRequest(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId1.fromPartial(object.taskId) - : undefined; - return message; - }, -}; - -function createBaseAbortTaskResponse(): AbortTaskResponse { - return { status: undefined }; -} - -export const AbortTaskResponse = { - fromJSON(object: any): AbortTaskResponse { - return { status: isSet(object.status) ? Status.fromJSON(object.status) : undefined }; - }, - - toJSON(message: AbortTaskResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): AbortTaskResponse { - const message = createBaseAbortTaskResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - return message; - }, -}; - -function createBaseGetTaskInfoRequest(): GetTaskInfoRequest { - return { taskId: undefined }; -} - -export const GetTaskInfoRequest = { - fromJSON(object: any): GetTaskInfoRequest { - return { taskId: isSet(object.taskId) ? TaskId1.fromJSON(object.taskId) : undefined }; - }, - - toJSON(message: GetTaskInfoRequest): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId1.toJSON(message.taskId) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetTaskInfoRequest { - const message = createBaseGetTaskInfoRequest(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId1.fromPartial(object.taskId) - : undefined; - return message; - }, -}; - -function createBaseGetDataResponse(): GetDataResponse { - return { recordBatch: undefined }; -} - -export const GetDataResponse = { - fromJSON(object: any): GetDataResponse { - return { recordBatch: isSet(object.recordBatch) ? DataChunk.fromJSON(object.recordBatch) : undefined }; - }, - - toJSON(message: GetDataResponse): unknown { - const obj: any = {}; - message.recordBatch !== undefined && - (obj.recordBatch = message.recordBatch ? DataChunk.toJSON(message.recordBatch) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetDataResponse { - const message = createBaseGetDataResponse(); - message.recordBatch = (object.recordBatch !== undefined && object.recordBatch !== null) - ? DataChunk.fromPartial(object.recordBatch) - : undefined; - return message; - }, -}; - -function createBaseExecuteRequest(): ExecuteRequest { - return { taskId: undefined, plan: undefined, epoch: undefined }; -} - -export const ExecuteRequest = { - fromJSON(object: any): ExecuteRequest { - return { - taskId: isSet(object.taskId) ? TaskId1.fromJSON(object.taskId) : undefined, - plan: isSet(object.plan) ? PlanFragment.fromJSON(object.plan) : undefined, - epoch: isSet(object.epoch) ? BatchQueryEpoch.fromJSON(object.epoch) : undefined, - }; - }, - - toJSON(message: ExecuteRequest): unknown { - const obj: any = {}; - message.taskId !== undefined && (obj.taskId = message.taskId ? TaskId1.toJSON(message.taskId) : undefined); - message.plan !== undefined && (obj.plan = message.plan ? PlanFragment.toJSON(message.plan) : undefined); - message.epoch !== undefined && (obj.epoch = message.epoch ? BatchQueryEpoch.toJSON(message.epoch) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): ExecuteRequest { - const message = createBaseExecuteRequest(); - message.taskId = (object.taskId !== undefined && object.taskId !== null) - ? TaskId1.fromPartial(object.taskId) - : undefined; - message.plan = (object.plan !== undefined && object.plan !== null) - ? PlanFragment.fromPartial(object.plan) - : undefined; - message.epoch = (object.epoch !== undefined && object.epoch !== null) - ? BatchQueryEpoch.fromPartial(object.epoch) - : undefined; - return message; - }, -}; - -function createBaseGetDataRequest(): GetDataRequest { - return { taskOutputId: undefined }; -} - -export const GetDataRequest = { - fromJSON(object: any): GetDataRequest { - return { taskOutputId: isSet(object.taskOutputId) ? TaskOutputId.fromJSON(object.taskOutputId) : undefined }; - }, - - toJSON(message: GetDataRequest): unknown { - const obj: any = {}; - message.taskOutputId !== undefined && - (obj.taskOutputId = message.taskOutputId ? TaskOutputId.toJSON(message.taskOutputId) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetDataRequest { - const message = createBaseGetDataRequest(); - message.taskOutputId = (object.taskOutputId !== undefined && object.taskOutputId !== null) - ? TaskOutputId.fromPartial(object.taskOutputId) - : undefined; - return message; - }, -}; - -function createBaseGetStreamRequest(): GetStreamRequest { - return { value: undefined }; -} - -export const GetStreamRequest = { - fromJSON(object: any): GetStreamRequest { - return { - value: isSet(object.get) - ? { $case: "get", get: GetStreamRequest_Get.fromJSON(object.get) } - : isSet(object.addPermits) - ? { $case: "addPermits", addPermits: GetStreamRequest_AddPermits.fromJSON(object.addPermits) } - : undefined, - }; - }, - - toJSON(message: GetStreamRequest): unknown { - const obj: any = {}; - message.value?.$case === "get" && - (obj.get = message.value?.get ? GetStreamRequest_Get.toJSON(message.value?.get) : undefined); - message.value?.$case === "addPermits" && (obj.addPermits = message.value?.addPermits - ? GetStreamRequest_AddPermits.toJSON(message.value?.addPermits) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): GetStreamRequest { - const message = createBaseGetStreamRequest(); - if (object.value?.$case === "get" && object.value?.get !== undefined && object.value?.get !== null) { - message.value = { $case: "get", get: GetStreamRequest_Get.fromPartial(object.value.get) }; - } - if ( - object.value?.$case === "addPermits" && - object.value?.addPermits !== undefined && - object.value?.addPermits !== null - ) { - message.value = { - $case: "addPermits", - addPermits: GetStreamRequest_AddPermits.fromPartial(object.value.addPermits), - }; - } - return message; - }, -}; - -function createBaseGetStreamRequest_Get(): GetStreamRequest_Get { - return { upActorId: 0, downActorId: 0, upFragmentId: 0, downFragmentId: 0 }; -} - -export const GetStreamRequest_Get = { - fromJSON(object: any): GetStreamRequest_Get { - return { - upActorId: isSet(object.upActorId) ? Number(object.upActorId) : 0, - downActorId: isSet(object.downActorId) ? Number(object.downActorId) : 0, - upFragmentId: isSet(object.upFragmentId) ? Number(object.upFragmentId) : 0, - downFragmentId: isSet(object.downFragmentId) ? Number(object.downFragmentId) : 0, - }; - }, - - toJSON(message: GetStreamRequest_Get): unknown { - const obj: any = {}; - message.upActorId !== undefined && (obj.upActorId = Math.round(message.upActorId)); - message.downActorId !== undefined && (obj.downActorId = Math.round(message.downActorId)); - message.upFragmentId !== undefined && (obj.upFragmentId = Math.round(message.upFragmentId)); - message.downFragmentId !== undefined && (obj.downFragmentId = Math.round(message.downFragmentId)); - return obj; - }, - - fromPartial, I>>(object: I): GetStreamRequest_Get { - const message = createBaseGetStreamRequest_Get(); - message.upActorId = object.upActorId ?? 0; - message.downActorId = object.downActorId ?? 0; - message.upFragmentId = object.upFragmentId ?? 0; - message.downFragmentId = object.downFragmentId ?? 0; - return message; - }, -}; - -function createBaseGetStreamRequest_AddPermits(): GetStreamRequest_AddPermits { - return { permits: 0 }; -} - -export const GetStreamRequest_AddPermits = { - fromJSON(object: any): GetStreamRequest_AddPermits { - return { permits: isSet(object.permits) ? Number(object.permits) : 0 }; - }, - - toJSON(message: GetStreamRequest_AddPermits): unknown { - const obj: any = {}; - message.permits !== undefined && (obj.permits = Math.round(message.permits)); - return obj; - }, - - fromPartial, I>>(object: I): GetStreamRequest_AddPermits { - const message = createBaseGetStreamRequest_AddPermits(); - message.permits = object.permits ?? 0; - return message; - }, -}; - -function createBaseGetStreamResponse(): GetStreamResponse { - return { message: undefined, permits: 0 }; -} - -export const GetStreamResponse = { - fromJSON(object: any): GetStreamResponse { - return { - message: isSet(object.message) ? StreamMessage.fromJSON(object.message) : undefined, - permits: isSet(object.permits) ? Number(object.permits) : 0, - }; - }, - - toJSON(message: GetStreamResponse): unknown { - const obj: any = {}; - message.message !== undefined && - (obj.message = message.message ? StreamMessage.toJSON(message.message) : undefined); - message.permits !== undefined && (obj.permits = Math.round(message.permits)); - return obj; - }, - - fromPartial, I>>(object: I): GetStreamResponse { - const message = createBaseGetStreamResponse(); - message.message = (object.message !== undefined && object.message !== null) - ? StreamMessage.fromPartial(object.message) - : undefined; - message.permits = object.permits ?? 0; - return message; - }, -}; - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/proto/gen/user.ts b/dashboard/proto/gen/user.ts deleted file mode 100644 index cdf9070d84258..0000000000000 --- a/dashboard/proto/gen/user.ts +++ /dev/null @@ -1,889 +0,0 @@ -/* eslint-disable */ -import { Status } from "./common"; - -export const protobufPackage = "user"; - -/** AuthInfo is the information required to login to a server. */ -export interface AuthInfo { - encryptionType: AuthInfo_EncryptionType; - encryptedValue: Uint8Array; -} - -export const AuthInfo_EncryptionType = { - UNSPECIFIED: "UNSPECIFIED", - PLAINTEXT: "PLAINTEXT", - SHA256: "SHA256", - MD5: "MD5", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type AuthInfo_EncryptionType = typeof AuthInfo_EncryptionType[keyof typeof AuthInfo_EncryptionType]; - -export function authInfo_EncryptionTypeFromJSON(object: any): AuthInfo_EncryptionType { - switch (object) { - case 0: - case "UNSPECIFIED": - return AuthInfo_EncryptionType.UNSPECIFIED; - case 1: - case "PLAINTEXT": - return AuthInfo_EncryptionType.PLAINTEXT; - case 2: - case "SHA256": - return AuthInfo_EncryptionType.SHA256; - case 3: - case "MD5": - return AuthInfo_EncryptionType.MD5; - case -1: - case "UNRECOGNIZED": - default: - return AuthInfo_EncryptionType.UNRECOGNIZED; - } -} - -export function authInfo_EncryptionTypeToJSON(object: AuthInfo_EncryptionType): string { - switch (object) { - case AuthInfo_EncryptionType.UNSPECIFIED: - return "UNSPECIFIED"; - case AuthInfo_EncryptionType.PLAINTEXT: - return "PLAINTEXT"; - case AuthInfo_EncryptionType.SHA256: - return "SHA256"; - case AuthInfo_EncryptionType.MD5: - return "MD5"; - case AuthInfo_EncryptionType.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** User defines a user in the system. */ -export interface UserInfo { - id: number; - name: string; - isSuper: boolean; - canCreateDb: boolean; - canCreateUser: boolean; - canLogin: boolean; - authInfo: - | AuthInfo - | undefined; - /** / Granted privileges will be only updated through the command of GRANT/REVOKE. */ - grantPrivileges: GrantPrivilege[]; -} - -/** GrantPrivilege defines a privilege granted to a user. */ -export interface GrantPrivilege { - object?: - | { $case: "databaseId"; databaseId: number } - | { $case: "schemaId"; schemaId: number } - | { $case: "tableId"; tableId: number } - | { $case: "sourceId"; sourceId: number } - | { $case: "sinkId"; sinkId: number } - | { $case: "viewId"; viewId: number } - | { $case: "functionId"; functionId: number } - | { $case: "allTablesSchemaId"; allTablesSchemaId: number } - | { $case: "allSourcesSchemaId"; allSourcesSchemaId: number }; - actionWithOpts: GrantPrivilege_ActionWithGrantOption[]; -} - -export const GrantPrivilege_Action = { - UNSPECIFIED: "UNSPECIFIED", - SELECT: "SELECT", - INSERT: "INSERT", - UPDATE: "UPDATE", - DELETE: "DELETE", - CREATE: "CREATE", - CONNECT: "CONNECT", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type GrantPrivilege_Action = typeof GrantPrivilege_Action[keyof typeof GrantPrivilege_Action]; - -export function grantPrivilege_ActionFromJSON(object: any): GrantPrivilege_Action { - switch (object) { - case 0: - case "UNSPECIFIED": - return GrantPrivilege_Action.UNSPECIFIED; - case 1: - case "SELECT": - return GrantPrivilege_Action.SELECT; - case 2: - case "INSERT": - return GrantPrivilege_Action.INSERT; - case 3: - case "UPDATE": - return GrantPrivilege_Action.UPDATE; - case 4: - case "DELETE": - return GrantPrivilege_Action.DELETE; - case 5: - case "CREATE": - return GrantPrivilege_Action.CREATE; - case 6: - case "CONNECT": - return GrantPrivilege_Action.CONNECT; - case -1: - case "UNRECOGNIZED": - default: - return GrantPrivilege_Action.UNRECOGNIZED; - } -} - -export function grantPrivilege_ActionToJSON(object: GrantPrivilege_Action): string { - switch (object) { - case GrantPrivilege_Action.UNSPECIFIED: - return "UNSPECIFIED"; - case GrantPrivilege_Action.SELECT: - return "SELECT"; - case GrantPrivilege_Action.INSERT: - return "INSERT"; - case GrantPrivilege_Action.UPDATE: - return "UPDATE"; - case GrantPrivilege_Action.DELETE: - return "DELETE"; - case GrantPrivilege_Action.CREATE: - return "CREATE"; - case GrantPrivilege_Action.CONNECT: - return "CONNECT"; - case GrantPrivilege_Action.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface GrantPrivilege_ActionWithGrantOption { - action: GrantPrivilege_Action; - withGrantOption: boolean; - grantedBy: number; -} - -export interface CreateUserRequest { - user: UserInfo | undefined; -} - -export interface CreateUserResponse { - status: Status | undefined; - version: number; -} - -export interface DropUserRequest { - userId: number; -} - -export interface DropUserResponse { - status: Status | undefined; - version: number; -} - -export interface UpdateUserRequest { - user: UserInfo | undefined; - updateFields: UpdateUserRequest_UpdateField[]; -} - -export const UpdateUserRequest_UpdateField = { - UNSPECIFIED: "UNSPECIFIED", - SUPER: "SUPER", - LOGIN: "LOGIN", - CREATE_DB: "CREATE_DB", - AUTH_INFO: "AUTH_INFO", - RENAME: "RENAME", - CREATE_USER: "CREATE_USER", - UNRECOGNIZED: "UNRECOGNIZED", -} as const; - -export type UpdateUserRequest_UpdateField = - typeof UpdateUserRequest_UpdateField[keyof typeof UpdateUserRequest_UpdateField]; - -export function updateUserRequest_UpdateFieldFromJSON(object: any): UpdateUserRequest_UpdateField { - switch (object) { - case 0: - case "UNSPECIFIED": - return UpdateUserRequest_UpdateField.UNSPECIFIED; - case 1: - case "SUPER": - return UpdateUserRequest_UpdateField.SUPER; - case 2: - case "LOGIN": - return UpdateUserRequest_UpdateField.LOGIN; - case 3: - case "CREATE_DB": - return UpdateUserRequest_UpdateField.CREATE_DB; - case 4: - case "AUTH_INFO": - return UpdateUserRequest_UpdateField.AUTH_INFO; - case 5: - case "RENAME": - return UpdateUserRequest_UpdateField.RENAME; - case 6: - case "CREATE_USER": - return UpdateUserRequest_UpdateField.CREATE_USER; - case -1: - case "UNRECOGNIZED": - default: - return UpdateUserRequest_UpdateField.UNRECOGNIZED; - } -} - -export function updateUserRequest_UpdateFieldToJSON(object: UpdateUserRequest_UpdateField): string { - switch (object) { - case UpdateUserRequest_UpdateField.UNSPECIFIED: - return "UNSPECIFIED"; - case UpdateUserRequest_UpdateField.SUPER: - return "SUPER"; - case UpdateUserRequest_UpdateField.LOGIN: - return "LOGIN"; - case UpdateUserRequest_UpdateField.CREATE_DB: - return "CREATE_DB"; - case UpdateUserRequest_UpdateField.AUTH_INFO: - return "AUTH_INFO"; - case UpdateUserRequest_UpdateField.RENAME: - return "RENAME"; - case UpdateUserRequest_UpdateField.CREATE_USER: - return "CREATE_USER"; - case UpdateUserRequest_UpdateField.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -export interface UpdateUserResponse { - status: Status | undefined; - version: number; -} - -export interface GrantPrivilegeRequest { - userIds: number[]; - privileges: GrantPrivilege[]; - withGrantOption: boolean; - grantedBy: number; -} - -export interface GrantPrivilegeResponse { - status: Status | undefined; - version: number; -} - -export interface RevokePrivilegeRequest { - userIds: number[]; - privileges: GrantPrivilege[]; - grantedBy: number; - revokeBy: number; - revokeGrantOption: boolean; - cascade: boolean; -} - -export interface RevokePrivilegeResponse { - status: Status | undefined; - version: number; -} - -function createBaseAuthInfo(): AuthInfo { - return { encryptionType: AuthInfo_EncryptionType.UNSPECIFIED, encryptedValue: new Uint8Array() }; -} - -export const AuthInfo = { - fromJSON(object: any): AuthInfo { - return { - encryptionType: isSet(object.encryptionType) - ? authInfo_EncryptionTypeFromJSON(object.encryptionType) - : AuthInfo_EncryptionType.UNSPECIFIED, - encryptedValue: isSet(object.encryptedValue) ? bytesFromBase64(object.encryptedValue) : new Uint8Array(), - }; - }, - - toJSON(message: AuthInfo): unknown { - const obj: any = {}; - message.encryptionType !== undefined && - (obj.encryptionType = authInfo_EncryptionTypeToJSON(message.encryptionType)); - message.encryptedValue !== undefined && - (obj.encryptedValue = base64FromBytes( - message.encryptedValue !== undefined ? message.encryptedValue : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>(object: I): AuthInfo { - const message = createBaseAuthInfo(); - message.encryptionType = object.encryptionType ?? AuthInfo_EncryptionType.UNSPECIFIED; - message.encryptedValue = object.encryptedValue ?? new Uint8Array(); - return message; - }, -}; - -function createBaseUserInfo(): UserInfo { - return { - id: 0, - name: "", - isSuper: false, - canCreateDb: false, - canCreateUser: false, - canLogin: false, - authInfo: undefined, - grantPrivileges: [], - }; -} - -export const UserInfo = { - fromJSON(object: any): UserInfo { - return { - id: isSet(object.id) ? Number(object.id) : 0, - name: isSet(object.name) ? String(object.name) : "", - isSuper: isSet(object.isSuper) ? Boolean(object.isSuper) : false, - canCreateDb: isSet(object.canCreateDb) ? Boolean(object.canCreateDb) : false, - canCreateUser: isSet(object.canCreateUser) ? Boolean(object.canCreateUser) : false, - canLogin: isSet(object.canLogin) ? Boolean(object.canLogin) : false, - authInfo: isSet(object.authInfo) ? AuthInfo.fromJSON(object.authInfo) : undefined, - grantPrivileges: Array.isArray(object?.grantPrivileges) - ? object.grantPrivileges.map((e: any) => GrantPrivilege.fromJSON(e)) - : [], - }; - }, - - toJSON(message: UserInfo): unknown { - const obj: any = {}; - message.id !== undefined && (obj.id = Math.round(message.id)); - message.name !== undefined && (obj.name = message.name); - message.isSuper !== undefined && (obj.isSuper = message.isSuper); - message.canCreateDb !== undefined && (obj.canCreateDb = message.canCreateDb); - message.canCreateUser !== undefined && (obj.canCreateUser = message.canCreateUser); - message.canLogin !== undefined && (obj.canLogin = message.canLogin); - message.authInfo !== undefined && (obj.authInfo = message.authInfo ? AuthInfo.toJSON(message.authInfo) : undefined); - if (message.grantPrivileges) { - obj.grantPrivileges = message.grantPrivileges.map((e) => e ? GrantPrivilege.toJSON(e) : undefined); - } else { - obj.grantPrivileges = []; - } - return obj; - }, - - fromPartial, I>>(object: I): UserInfo { - const message = createBaseUserInfo(); - message.id = object.id ?? 0; - message.name = object.name ?? ""; - message.isSuper = object.isSuper ?? false; - message.canCreateDb = object.canCreateDb ?? false; - message.canCreateUser = object.canCreateUser ?? false; - message.canLogin = object.canLogin ?? false; - message.authInfo = (object.authInfo !== undefined && object.authInfo !== null) - ? AuthInfo.fromPartial(object.authInfo) - : undefined; - message.grantPrivileges = object.grantPrivileges?.map((e) => GrantPrivilege.fromPartial(e)) || []; - return message; - }, -}; - -function createBaseGrantPrivilege(): GrantPrivilege { - return { object: undefined, actionWithOpts: [] }; -} - -export const GrantPrivilege = { - fromJSON(object: any): GrantPrivilege { - return { - object: isSet(object.databaseId) - ? { $case: "databaseId", databaseId: Number(object.databaseId) } - : isSet(object.schemaId) - ? { $case: "schemaId", schemaId: Number(object.schemaId) } - : isSet(object.tableId) - ? { $case: "tableId", tableId: Number(object.tableId) } - : isSet(object.sourceId) - ? { $case: "sourceId", sourceId: Number(object.sourceId) } - : isSet(object.sinkId) - ? { $case: "sinkId", sinkId: Number(object.sinkId) } - : isSet(object.viewId) - ? { $case: "viewId", viewId: Number(object.viewId) } - : isSet(object.functionId) - ? { $case: "functionId", functionId: Number(object.functionId) } - : isSet(object.allTablesSchemaId) - ? { $case: "allTablesSchemaId", allTablesSchemaId: Number(object.allTablesSchemaId) } - : isSet(object.allSourcesSchemaId) - ? { $case: "allSourcesSchemaId", allSourcesSchemaId: Number(object.allSourcesSchemaId) } - : undefined, - actionWithOpts: Array.isArray(object?.actionWithOpts) - ? object.actionWithOpts.map((e: any) => GrantPrivilege_ActionWithGrantOption.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GrantPrivilege): unknown { - const obj: any = {}; - message.object?.$case === "databaseId" && (obj.databaseId = Math.round(message.object?.databaseId)); - message.object?.$case === "schemaId" && (obj.schemaId = Math.round(message.object?.schemaId)); - message.object?.$case === "tableId" && (obj.tableId = Math.round(message.object?.tableId)); - message.object?.$case === "sourceId" && (obj.sourceId = Math.round(message.object?.sourceId)); - message.object?.$case === "sinkId" && (obj.sinkId = Math.round(message.object?.sinkId)); - message.object?.$case === "viewId" && (obj.viewId = Math.round(message.object?.viewId)); - message.object?.$case === "functionId" && (obj.functionId = Math.round(message.object?.functionId)); - message.object?.$case === "allTablesSchemaId" && - (obj.allTablesSchemaId = Math.round(message.object?.allTablesSchemaId)); - message.object?.$case === "allSourcesSchemaId" && - (obj.allSourcesSchemaId = Math.round(message.object?.allSourcesSchemaId)); - if (message.actionWithOpts) { - obj.actionWithOpts = message.actionWithOpts.map((e) => - e ? GrantPrivilege_ActionWithGrantOption.toJSON(e) : undefined - ); - } else { - obj.actionWithOpts = []; - } - return obj; - }, - - fromPartial, I>>(object: I): GrantPrivilege { - const message = createBaseGrantPrivilege(); - if ( - object.object?.$case === "databaseId" && - object.object?.databaseId !== undefined && - object.object?.databaseId !== null - ) { - message.object = { $case: "databaseId", databaseId: object.object.databaseId }; - } - if ( - object.object?.$case === "schemaId" && object.object?.schemaId !== undefined && object.object?.schemaId !== null - ) { - message.object = { $case: "schemaId", schemaId: object.object.schemaId }; - } - if (object.object?.$case === "tableId" && object.object?.tableId !== undefined && object.object?.tableId !== null) { - message.object = { $case: "tableId", tableId: object.object.tableId }; - } - if ( - object.object?.$case === "sourceId" && object.object?.sourceId !== undefined && object.object?.sourceId !== null - ) { - message.object = { $case: "sourceId", sourceId: object.object.sourceId }; - } - if (object.object?.$case === "sinkId" && object.object?.sinkId !== undefined && object.object?.sinkId !== null) { - message.object = { $case: "sinkId", sinkId: object.object.sinkId }; - } - if (object.object?.$case === "viewId" && object.object?.viewId !== undefined && object.object?.viewId !== null) { - message.object = { $case: "viewId", viewId: object.object.viewId }; - } - if ( - object.object?.$case === "functionId" && - object.object?.functionId !== undefined && - object.object?.functionId !== null - ) { - message.object = { $case: "functionId", functionId: object.object.functionId }; - } - if ( - object.object?.$case === "allTablesSchemaId" && - object.object?.allTablesSchemaId !== undefined && - object.object?.allTablesSchemaId !== null - ) { - message.object = { $case: "allTablesSchemaId", allTablesSchemaId: object.object.allTablesSchemaId }; - } - if ( - object.object?.$case === "allSourcesSchemaId" && - object.object?.allSourcesSchemaId !== undefined && - object.object?.allSourcesSchemaId !== null - ) { - message.object = { $case: "allSourcesSchemaId", allSourcesSchemaId: object.object.allSourcesSchemaId }; - } - message.actionWithOpts = object.actionWithOpts?.map((e) => GrantPrivilege_ActionWithGrantOption.fromPartial(e)) || - []; - return message; - }, -}; - -function createBaseGrantPrivilege_ActionWithGrantOption(): GrantPrivilege_ActionWithGrantOption { - return { action: GrantPrivilege_Action.UNSPECIFIED, withGrantOption: false, grantedBy: 0 }; -} - -export const GrantPrivilege_ActionWithGrantOption = { - fromJSON(object: any): GrantPrivilege_ActionWithGrantOption { - return { - action: isSet(object.action) ? grantPrivilege_ActionFromJSON(object.action) : GrantPrivilege_Action.UNSPECIFIED, - withGrantOption: isSet(object.withGrantOption) ? Boolean(object.withGrantOption) : false, - grantedBy: isSet(object.grantedBy) ? Number(object.grantedBy) : 0, - }; - }, - - toJSON(message: GrantPrivilege_ActionWithGrantOption): unknown { - const obj: any = {}; - message.action !== undefined && (obj.action = grantPrivilege_ActionToJSON(message.action)); - message.withGrantOption !== undefined && (obj.withGrantOption = message.withGrantOption); - message.grantedBy !== undefined && (obj.grantedBy = Math.round(message.grantedBy)); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GrantPrivilege_ActionWithGrantOption { - const message = createBaseGrantPrivilege_ActionWithGrantOption(); - message.action = object.action ?? GrantPrivilege_Action.UNSPECIFIED; - message.withGrantOption = object.withGrantOption ?? false; - message.grantedBy = object.grantedBy ?? 0; - return message; - }, -}; - -function createBaseCreateUserRequest(): CreateUserRequest { - return { user: undefined }; -} - -export const CreateUserRequest = { - fromJSON(object: any): CreateUserRequest { - return { user: isSet(object.user) ? UserInfo.fromJSON(object.user) : undefined }; - }, - - toJSON(message: CreateUserRequest): unknown { - const obj: any = {}; - message.user !== undefined && (obj.user = message.user ? UserInfo.toJSON(message.user) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): CreateUserRequest { - const message = createBaseCreateUserRequest(); - message.user = (object.user !== undefined && object.user !== null) ? UserInfo.fromPartial(object.user) : undefined; - return message; - }, -}; - -function createBaseCreateUserResponse(): CreateUserResponse { - return { status: undefined, version: 0 }; -} - -export const CreateUserResponse = { - fromJSON(object: any): CreateUserResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: CreateUserResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): CreateUserResponse { - const message = createBaseCreateUserResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseDropUserRequest(): DropUserRequest { - return { userId: 0 }; -} - -export const DropUserRequest = { - fromJSON(object: any): DropUserRequest { - return { userId: isSet(object.userId) ? Number(object.userId) : 0 }; - }, - - toJSON(message: DropUserRequest): unknown { - const obj: any = {}; - message.userId !== undefined && (obj.userId = Math.round(message.userId)); - return obj; - }, - - fromPartial, I>>(object: I): DropUserRequest { - const message = createBaseDropUserRequest(); - message.userId = object.userId ?? 0; - return message; - }, -}; - -function createBaseDropUserResponse(): DropUserResponse { - return { status: undefined, version: 0 }; -} - -export const DropUserResponse = { - fromJSON(object: any): DropUserResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: DropUserResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): DropUserResponse { - const message = createBaseDropUserResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseUpdateUserRequest(): UpdateUserRequest { - return { user: undefined, updateFields: [] }; -} - -export const UpdateUserRequest = { - fromJSON(object: any): UpdateUserRequest { - return { - user: isSet(object.user) ? UserInfo.fromJSON(object.user) : undefined, - updateFields: Array.isArray(object?.updateFields) - ? object.updateFields.map((e: any) => updateUserRequest_UpdateFieldFromJSON(e)) - : [], - }; - }, - - toJSON(message: UpdateUserRequest): unknown { - const obj: any = {}; - message.user !== undefined && (obj.user = message.user ? UserInfo.toJSON(message.user) : undefined); - if (message.updateFields) { - obj.updateFields = message.updateFields.map((e) => updateUserRequest_UpdateFieldToJSON(e)); - } else { - obj.updateFields = []; - } - return obj; - }, - - fromPartial, I>>(object: I): UpdateUserRequest { - const message = createBaseUpdateUserRequest(); - message.user = (object.user !== undefined && object.user !== null) ? UserInfo.fromPartial(object.user) : undefined; - message.updateFields = object.updateFields?.map((e) => e) || []; - return message; - }, -}; - -function createBaseUpdateUserResponse(): UpdateUserResponse { - return { status: undefined, version: 0 }; -} - -export const UpdateUserResponse = { - fromJSON(object: any): UpdateUserResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: UpdateUserResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): UpdateUserResponse { - const message = createBaseUpdateUserResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseGrantPrivilegeRequest(): GrantPrivilegeRequest { - return { userIds: [], privileges: [], withGrantOption: false, grantedBy: 0 }; -} - -export const GrantPrivilegeRequest = { - fromJSON(object: any): GrantPrivilegeRequest { - return { - userIds: Array.isArray(object?.userIds) ? object.userIds.map((e: any) => Number(e)) : [], - privileges: Array.isArray(object?.privileges) - ? object.privileges.map((e: any) => GrantPrivilege.fromJSON(e)) - : [], - withGrantOption: isSet(object.withGrantOption) ? Boolean(object.withGrantOption) : false, - grantedBy: isSet(object.grantedBy) ? Number(object.grantedBy) : 0, - }; - }, - - toJSON(message: GrantPrivilegeRequest): unknown { - const obj: any = {}; - if (message.userIds) { - obj.userIds = message.userIds.map((e) => Math.round(e)); - } else { - obj.userIds = []; - } - if (message.privileges) { - obj.privileges = message.privileges.map((e) => e ? GrantPrivilege.toJSON(e) : undefined); - } else { - obj.privileges = []; - } - message.withGrantOption !== undefined && (obj.withGrantOption = message.withGrantOption); - message.grantedBy !== undefined && (obj.grantedBy = Math.round(message.grantedBy)); - return obj; - }, - - fromPartial, I>>(object: I): GrantPrivilegeRequest { - const message = createBaseGrantPrivilegeRequest(); - message.userIds = object.userIds?.map((e) => e) || []; - message.privileges = object.privileges?.map((e) => GrantPrivilege.fromPartial(e)) || []; - message.withGrantOption = object.withGrantOption ?? false; - message.grantedBy = object.grantedBy ?? 0; - return message; - }, -}; - -function createBaseGrantPrivilegeResponse(): GrantPrivilegeResponse { - return { status: undefined, version: 0 }; -} - -export const GrantPrivilegeResponse = { - fromJSON(object: any): GrantPrivilegeResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: GrantPrivilegeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): GrantPrivilegeResponse { - const message = createBaseGrantPrivilegeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -function createBaseRevokePrivilegeRequest(): RevokePrivilegeRequest { - return { userIds: [], privileges: [], grantedBy: 0, revokeBy: 0, revokeGrantOption: false, cascade: false }; -} - -export const RevokePrivilegeRequest = { - fromJSON(object: any): RevokePrivilegeRequest { - return { - userIds: Array.isArray(object?.userIds) ? object.userIds.map((e: any) => Number(e)) : [], - privileges: Array.isArray(object?.privileges) - ? object.privileges.map((e: any) => GrantPrivilege.fromJSON(e)) - : [], - grantedBy: isSet(object.grantedBy) ? Number(object.grantedBy) : 0, - revokeBy: isSet(object.revokeBy) ? Number(object.revokeBy) : 0, - revokeGrantOption: isSet(object.revokeGrantOption) ? Boolean(object.revokeGrantOption) : false, - cascade: isSet(object.cascade) ? Boolean(object.cascade) : false, - }; - }, - - toJSON(message: RevokePrivilegeRequest): unknown { - const obj: any = {}; - if (message.userIds) { - obj.userIds = message.userIds.map((e) => Math.round(e)); - } else { - obj.userIds = []; - } - if (message.privileges) { - obj.privileges = message.privileges.map((e) => e ? GrantPrivilege.toJSON(e) : undefined); - } else { - obj.privileges = []; - } - message.grantedBy !== undefined && (obj.grantedBy = Math.round(message.grantedBy)); - message.revokeBy !== undefined && (obj.revokeBy = Math.round(message.revokeBy)); - message.revokeGrantOption !== undefined && (obj.revokeGrantOption = message.revokeGrantOption); - message.cascade !== undefined && (obj.cascade = message.cascade); - return obj; - }, - - fromPartial, I>>(object: I): RevokePrivilegeRequest { - const message = createBaseRevokePrivilegeRequest(); - message.userIds = object.userIds?.map((e) => e) || []; - message.privileges = object.privileges?.map((e) => GrantPrivilege.fromPartial(e)) || []; - message.grantedBy = object.grantedBy ?? 0; - message.revokeBy = object.revokeBy ?? 0; - message.revokeGrantOption = object.revokeGrantOption ?? false; - message.cascade = object.cascade ?? false; - return message; - }, -}; - -function createBaseRevokePrivilegeResponse(): RevokePrivilegeResponse { - return { status: undefined, version: 0 }; -} - -export const RevokePrivilegeResponse = { - fromJSON(object: any): RevokePrivilegeResponse { - return { - status: isSet(object.status) ? Status.fromJSON(object.status) : undefined, - version: isSet(object.version) ? Number(object.version) : 0, - }; - }, - - toJSON(message: RevokePrivilegeResponse): unknown { - const obj: any = {}; - message.status !== undefined && (obj.status = message.status ? Status.toJSON(message.status) : undefined); - message.version !== undefined && (obj.version = Math.round(message.version)); - return obj; - }, - - fromPartial, I>>(object: I): RevokePrivilegeResponse { - const message = createBaseRevokePrivilegeResponse(); - message.status = (object.status !== undefined && object.status !== null) - ? Status.fromPartial(object.status) - : undefined; - message.version = object.version ?? 0; - return message; - }, -}; - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") { - return globalThis; - } - if (typeof self !== "undefined") { - return self; - } - if (typeof window !== "undefined") { - return window; - } - if (typeof global !== "undefined") { - return global; - } - throw "Unable to locate global object"; -})(); - -function bytesFromBase64(b64: string): Uint8Array { - if (globalThis.Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if (globalThis.Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; - -export type DeepPartial = T extends Builtin ? T - : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> - : T extends { $case: string } ? { [K in keyof Omit]?: DeepPartial } & { $case: T["$case"] } - : T extends {} ? { [K in keyof T]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin ? P - : P & { [K in keyof P]: Exact } & { [K in Exclude>]: never }; - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/dashboard/scripts/generate_proto.sh b/dashboard/scripts/generate_proto.sh index 860820d56fea1..0ce74084a42aa 100755 --- a/dashboard/scripts/generate_proto.sh +++ b/dashboard/scripts/generate_proto.sh @@ -8,11 +8,13 @@ cp -a ../proto/*.proto tmp_gen # Array in proto will conflict with JavaScript's Array, so we replace it with RwArray. if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i "" -e "s/Array/RwArray/" "tmp_gen/data.proto" + sed -i "" -e "s/Array/RwArray/" "tmp_gen/data.proto" else - sed -i -e "s/Array/RwArray/" "tmp_gen/data.proto" + sed -i -e "s/Array/RwArray/" "tmp_gen/data.proto" fi +mkdir -p proto/gen + protoc --plugin=./node_modules/.bin/protoc-gen-ts_proto \ --experimental_allow_proto3_optional \ --ts_proto_out=proto/gen/ \ diff --git a/docker/Dockerfile b/docker/Dockerfile index eeb2e8a746185..ed5f0c9e27fcc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -34,8 +34,9 @@ RUN rustup self update \ RUN cargo fetch -RUN cargo build -p risingwave_cmd_all --release --features "static-link static-log-level" && \ +RUN cargo build -p risingwave_cmd_all -p risingwave_java_binding --release --features "static-link static-log-level" && \ mkdir -p /risingwave/bin && mv /risingwave/target/release/risingwave /risingwave/bin/ && \ + mkdir -p /risingwave/lib && mv /risingwave/target/release/librisingwave_java_binding.so /risingwave/lib && \ cargo clean RUN cd /risingwave/java && mvn -B package -Dmaven.test.skip=true && \ @@ -47,10 +48,13 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install ca-certi FROM image-base as risingwave LABEL org.opencontainers.image.source https://github.com/risingwavelabs/risingwave -RUN mkdir -p /risingwave/bin/connector-node +RUN mkdir -p /risingwave/bin/connector-node && mkdir -p /risingwave/lib COPY --from=builder /risingwave/bin/risingwave /risingwave/bin/risingwave COPY --from=builder /risingwave/bin/connector-node /risingwave/bin/connector-node COPY --from=builder /risingwave/ui /risingwave/ui +COPY --from=builder /risingwave/lib/librisingwave_java_binding.so /risingwave/lib/librisingwave_java_binding.so +# Set java.library.path env to /risingwave/lib +ENV RW_JAVA_BINDING_LIB_PATH /risingwave/lib # Set default playground mode to docker-playground profile ENV PLAYGROUND_PROFILE docker-playground # Set default dashboard UI to local path instead of github proxy diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 48e38a1ac591e..3a422809599fc 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -7,12 +7,12 @@ services: - compactor-node - "--listen-addr" - "0.0.0.0:6660" + - "--advertise-addr" + - "compactor-0:6660" - "--prometheus-listener-addr" - "0.0.0.0:1260" - "--metrics-level" - "1" - - "--state-store" - - "hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001" - "--meta-address" - "http://meta-node-0:5690" - "--config-path" @@ -28,6 +28,8 @@ services: - "./risingwave.toml:/risingwave.toml" environment: RUST_BACKTRACE: "1" + # If ENABLE_TELEMETRY is not set, telemetry will start by default + ENABLE_TELEMETRY: ${ENABLE_TELEMETRY} container_name: compactor-0 healthcheck: test: @@ -44,16 +46,16 @@ services: - compute-node - "--listen-addr" - "0.0.0.0:5688" - - "--prometheus-listener-addr" - - "0.0.0.0:1222" - "--advertise-addr" - "compute-node-0:5688" + - "--prometheus-listener-addr" + - "0.0.0.0:1222" - "--metrics-level" - "1" - - "--state-store" - - "hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001" - "--meta-address" - "http://meta-node-0:5690" + - "--connector-rpc-endpoint" + - "connector-node:50051" - "--config-path" - /risingwave.toml expose: @@ -67,6 +69,8 @@ services: - "./risingwave.toml:/risingwave.toml" environment: RUST_BACKTRACE: "1" + # If ENABLE_TELEMETRY is not set, telemetry will start by default + ENABLE_TELEMETRY: ${ENABLE_TELEMETRY} container_name: compute-node-0 healthcheck: test: @@ -128,8 +132,14 @@ services: - "0.0.0.0:4566" - "--meta-addr" - "http://meta-node-0:5690" + - "--advertise-addr" + - "frontend-node-0:4566" - "--config-path" - /risingwave.toml + - "--prometheus-listener-addr" + - "0.0.0.0:2222" + - "--metrics-level" + - "1" expose: - "4566" ports: @@ -140,6 +150,8 @@ services: - "./risingwave.toml:/risingwave.toml" environment: RUST_BACKTRACE: "1" + # If ENABLE_TELEMETRY is not set, telemetry will start by default + ENABLE_TELEMETRY: ${ENABLE_TELEMETRY} container_name: frontend-node-0 healthcheck: test: @@ -191,6 +203,10 @@ services: - etcd - "--etcd-endpoints" - "etcd-0:2388" + - "--connector-rpc-endpoint" + - "connector-node:50051" + - "--state-store" + - "hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001" - "--config-path" - /risingwave.toml expose: @@ -206,6 +222,8 @@ services: - "./risingwave.toml:/risingwave.toml" environment: RUST_BACKTRACE: "1" + # If ENABLE_TELEMETRY is not set, telemetry will start by default + ENABLE_TELEMETRY: ${ENABLE_TELEMETRY} container_name: meta-node-0 healthcheck: test: @@ -234,7 +252,17 @@ services: depends_on: [] volumes: - "minio-0:/data" - entrypoint: "\n/bin/sh -c '\nset -e\nmkdir -p \"/data/hummock001\"\n/usr/bin/docker-entrypoint.sh \"$$0\" \"$$@\"\n'" + entrypoint: " + + /bin/sh -c ' + + set -e + + mkdir -p \"/data/hummock001\" + + /usr/bin/docker-entrypoint.sh \"$$0\" \"$$@\" + + '" environment: MINIO_CI_CD: "1" MINIO_PROMETHEUS_AUTH_TYPE: public @@ -279,13 +307,20 @@ services: interval: 1s timeout: 5s retries: 5 - redpanda: + connector-node: + image: ghcr.io/risingwavelabs/risingwave:latest + entrypoint: "/risingwave/bin/connector-node/start-service.sh" + ports: + - 50051 + - 50052 + container_name: connector-node + message_queue: image: "docker.vectorized.io/vectorized/redpanda:latest" command: - redpanda - start - "--smp" - - "4" + - "1" - "--reserve-memory" - 0M - "--memory" @@ -297,18 +332,20 @@ services: - "--kafka-addr" - "PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092" - "--advertise-kafka-addr" - - "PLAINTEXT://redpanda:29092,OUTSIDE://localhost:9092" + - "PLAINTEXT://message_queue:29092,OUTSIDE://localhost:9092" expose: - "29092" - "9092" + - "9644" ports: + - "29092:29092" - "9092:9092" - "9644:9644" depends_on: [] volumes: - - "redpanda:/var/lib/redpanda/data" + - "message_queue:/var/lib/redpanda/data" environment: {} - container_name: redpanda + container_name: message_queue healthcheck: test: - CMD @@ -327,6 +364,6 @@ volumes: external: false prometheus-0: external: false - redpanda: + message_queue: external: false name: risingwave-compose diff --git a/docker/prometheus.yaml b/docker/prometheus.yaml index eb75b54d02bc7..f919847828839 100644 --- a/docker/prometheus.yaml +++ b/docker/prometheus.yaml @@ -15,11 +15,11 @@ scrape_configs: - job_name: meta static_configs: - targets: ["meta-node-0:1250"] - + - job_name: minio metrics_path: /minio/v2/metrics/cluster static_configs: - - targets: ["minio-0:9301"] + - targets: ["minio-0:9301"] - job_name: compactor static_configs: @@ -29,6 +29,14 @@ scrape_configs: static_configs: - targets: ["etcd-0:2379"] + - job_name: frontend + static_configs: + - targets: ["frontend-node-0:2222"] + - job_name: redpanda static_configs: - - targets: ["redpanda:9644"] + - targets: ["message_queue:9644"] + + - job_name: connector-node + static_configs: + - targets: ["connector-node:50052"] \ No newline at end of file diff --git a/docs/developer-guide.md b/docs/developer-guide.md index ba144ea04d983..2d517092682c8 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -72,7 +72,7 @@ RiseDev is the development mode of RisingWave. To develop RisingWave, you need t * OpenSSL * PostgreSQL (psql) (>= 14.1) * Tmux (>= v3.2a) -* LLVM 15 (To workaround some bugs in macOS toolchain, see https://github.com/risingwavelabs/risingwave/issues/6205). +* LLVM 15 (For macOS only, to workaround some bugs in macOS toolchain. See https://github.com/risingwavelabs/risingwave/issues/6205). To install the dependencies on macOS, run: @@ -90,6 +90,10 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh Then you'll be able to compile and start RiseDev! +> **Note** +> +> `.cargo/config.toml` contains `rustflags` configurations like `-Clink-arg` and `-Ctarget-feature`. Since it will be [merged](https://doc.rust-lang.org/cargo/reference/config.html#hierarchical-structure) with `$HOME/.cargo/config.toml`, check the config files and make sure they don't conflict if you have global `rustflags` configurations for e.g. linker there. + ## Start and monitor a dev cluster You can now build RiseDev and start a dev cluster. It is as simple as: diff --git a/e2e_test/batch/aggregate/array_agg.slt.part b/e2e_test/batch/aggregate/array_agg.slt.part index 4403d8089aa56..537af2eab3dc0 100644 --- a/e2e_test/batch/aggregate/array_agg.slt.part +++ b/e2e_test/batch/aggregate/array_agg.slt.part @@ -2,7 +2,7 @@ statement ok SET RW_IMPLICIT_FLUSH TO true; statement ok -create table t(v1 varchar, v2 int, v3 int) +create table t(v1 varchar, v2 int, v3 int); query T select array_agg(v1) from t; @@ -18,10 +18,7 @@ select array_agg(v1) from t; {NULL} statement ok -delete from t; - -statement ok -insert into t values ('aaa', 1, 1), ('bbb', 0, 2), ('ccc', 0, 5), ('ddd', 1, 4) +insert into t values ('aaa', 1, 1), ('bbb', 0, 2), ('ccc', 0, 5), ('ddd', 1, 4); query T select b from (select unnest(a) from (select array_agg(v3) as v3_arr from t) g(a)) p(b) order by b; @@ -30,16 +27,22 @@ select b from (select unnest(a) from (select array_agg(v3) as v3_arr from t) g(a 2 4 5 +NULL + +query T +select array_agg(v1 order by v3 asc nulls first) from t; +---- +{NULL,aaa,bbb,ddd,ccc} query T -select array_agg(v1 order by v3 desc) from t +select array_agg(v1 order by v3 desc) from t; ---- -{ccc,ddd,bbb,aaa} +{NULL,ccc,ddd,bbb,aaa} query T -select array_agg(v1 order by v2 asc, v3 desc) from t +select array_agg(v1 order by v2 asc nulls last, v3 desc) from t; ---- -{ccc,bbb,ddd,aaa} +{ccc,bbb,ddd,aaa,NULL} statement ok -drop table t +drop table t; diff --git a/e2e_test/batch/aggregate/sum.slt.part b/e2e_test/batch/aggregate/sum.slt.part index 7d847e4a2f81a..dd5529e3f8bfa 100644 --- a/e2e_test/batch/aggregate/sum.slt.part +++ b/e2e_test/batch/aggregate/sum.slt.part @@ -77,7 +77,7 @@ select sum(d) from t; statement ok insert into t values (9000000000000000000000000000); -statement error QueryError: Expr error: Numeric out of range +statement error Expr error: Numeric out of range select sum(d) from t; statement ok diff --git a/e2e_test/batch/basic/dml.slt.part b/e2e_test/batch/basic/dml.slt.part index 8b387b56e570a..1b4ad5e459c2f 100644 --- a/e2e_test/batch/basic/dml.slt.part +++ b/e2e_test/batch/basic/dml.slt.part @@ -70,3 +70,24 @@ select count(*) from t; statement ok drop table t; + +statement ok +create table t (v1 int, v2 int primary key, v3 varchar); + +statement ok +insert into t values (0, 1, 'a'), (1, 2, 'b'); + +statement ok +update t set (v1, v3) = (v1+v2, v3||v3); + +query IIT +select * from t order by v1; +---- +1 1 aa +3 2 bb + +statement error QueryError: Bind error: update modifying the PK column is banned +update t set (v3, v2) = (v3||v3, v1+v2); + +statement ok +drop table t; diff --git a/e2e_test/batch/basic/generate_series.slt.part b/e2e_test/batch/basic/generate_series.slt.part index c65272bdbd44c..28420214d37c7 100644 --- a/e2e_test/batch/basic/generate_series.slt.part +++ b/e2e_test/batch/basic/generate_series.slt.part @@ -87,3 +87,7 @@ SELECT * FROM generate_series('2'::INT,'10'::INT,'0'::INT); query I SELECT * FROM generate_series('2'::INT,'10'::INT,'-2'::INT); ---- + +query I +SELECT * FROM generate_series(1, 100000000, 1) where 1=0; +---- diff --git a/e2e_test/batch/basic/index.slt.part b/e2e_test/batch/basic/index.slt.part index 0077867023710..a45cd724bb451 100644 --- a/e2e_test/batch/basic/index.slt.part +++ b/e2e_test/batch/basic/index.slt.part @@ -11,23 +11,23 @@ statement ok create index idx2 on t1(v2); statement ok -insert into t1 values(1, 2),(3,4),(5,6); +insert into t1 values (1, 2), (3, 4), (5, 6); statement ok -explain select v1,v2 from t1 where v1 = 1; +explain select v1, v2 from t1 where v1 = 1; query II -select v1,v2 from t1 where v1 = 1; +select v1, v2 from t1 where v1 = 1; ---- 1 2 query II -select v1,v2 from t1 where v2 = 4; +select v1, v2 from t1 where v2 = 4; ---- 3 4 query II -select v1,v2 from t1 where v1 = 1 or v2 = 4 order by v1, v2; +select v1, v2 from t1 where v1 = 1 or v2 = 4 order by v1, v2; ---- 1 2 3 4 @@ -36,10 +36,57 @@ statement ok delete from t1 where v1 = 1; query II -select v1,v2 from t1 order by v1, v2; +select v1, v2 from t1 order by v1, v2; ---- 3 4 5 6 +statement ok +insert into t1 values (NULL, 5); + +statement ok +create index idx3 on t1(v1 desc); + +statement ok +create index idx4 on t1(v1 nulls first); + +statement ok +create index idx5 on t1(v1 desc nulls last); + +query II +select v1, v2 from t1 order by v1; +---- +3 4 +5 6 +NULL 5 + +query II +select v1, v2 from t1 order by v1 desc; +---- +NULL 5 +5 6 +3 4 + +query II +select v1, v2 from t1 order by v1 asc nulls first; +---- +NULL 5 +3 4 +5 6 + +query II +select v1, v2 from t1 order by v1 desc nulls last; +---- +5 6 +3 4 +NULL 5 + +query II +select v1, v2 from t1 order by v1 desc nulls first; +---- +NULL 5 +5 6 +3 4 + statement ok drop table t1; diff --git a/e2e_test/batch/basic/join.slt.part b/e2e_test/batch/basic/join.slt.part index 1f7daba0f46c5..cf2ae46dd0772 100644 --- a/e2e_test/batch/basic/join.slt.part +++ b/e2e_test/batch/basic/join.slt.part @@ -32,6 +32,25 @@ select * from t1 join t2 using(v1) join t3 using(v2); ---- 2 1 3 3 +statement ok +set batch_parallelism = 1; + +query IIIIII +select * from t1 join t2 using(v1) join t3 using(v2); +---- +2 1 3 3 + +statement ok +set batch_parallelism = 1000; + +query IIIIII +select * from t1 join t2 using(v1) join t3 using(v2); +---- +2 1 3 3 + +statement ok +set batch_parallelism = 0; + statement ok create index i1 on t1(v1) include(v2); @@ -73,7 +92,7 @@ statement ok insert into t values (1),(2),(3),(4),(5); query I rowsort -Select * from t join i using(x) +select * from t join i using(x) ---- 1 2 @@ -81,6 +100,11 @@ Select * from t join i using(x) 4 5 +query I +select * from t natural join (select * from t where 1=0); +---- + + statement ok drop index i; diff --git a/e2e_test/batch/basic/order_by.slt.part b/e2e_test/batch/basic/order_by.slt.part index 670280e8f5fff..3fae22b6386d2 100644 --- a/e2e_test/batch/basic/order_by.slt.part +++ b/e2e_test/batch/basic/order_by.slt.part @@ -5,10 +5,10 @@ statement ok create table t (v1 int, v2 int, v3 int); statement ok -insert into t values (1,4,2), (2,3,3), (3,4,4), (4,3,5) +insert into t values (1,4,2), (2,3,3), (3,4,4), (4,3,5); query III rowsort -select * from t +select * from t; ---- 1 4 2 2 3 3 @@ -16,7 +16,7 @@ select * from t 4 3 5 query III -select * from t order by v1 desc +select * from t order by v1 desc; ---- 4 3 5 3 4 4 @@ -48,17 +48,17 @@ select * from t order by v1 + v2, v1; 4 3 5 query III -select * from t order by v1 desc limit 1 +select * from t order by v1 desc limit 1; ---- 4 3 5 query III -select * from t order by v1 desc limit 1 offset 1 +select * from t order by v1 desc limit 1 offset 1; ---- 3 4 4 query III -select * from t order by v2, v1 +select * from t order by v2, v1; ---- 2 3 3 4 3 5 @@ -66,13 +66,13 @@ select * from t order by v2, v1 3 4 4 query III -select * from t order by v2, v1 limit 2 +select * from t order by v2, v1 limit 2; ---- 2 3 3 4 3 5 query III -select * from t order by v2, v1 limit 10 +select * from t order by v2, v1 limit 10; ---- 2 3 3 4 3 5 @@ -80,7 +80,7 @@ select * from t order by v2, v1 limit 10 3 4 4 query III -select * from t order by v2 desc, v1 limit 2 +select * from t order by v2 desc, v1 limit 2; ---- 1 4 2 3 4 4 @@ -94,6 +94,24 @@ select * from t order by v1 limit 2; 1 4 2 2 3 3 +query III +select * from t order by v1 asc limit 2; +---- +1 4 2 +2 3 3 + +query III +select * from t order by v1 nulls first limit 2; +---- +NULL 7 NULL +1 4 2 + +query III +select * from t order by v1 asc nulls last limit 2; +---- +1 4 2 +2 3 3 + query III select * from t order by v1 desc limit 7; ---- @@ -103,5 +121,23 @@ NULL 7 NULL 2 3 3 1 4 2 +query III +select * from t order by v1 desc nulls first limit 7; +---- +NULL 7 NULL +4 3 5 +3 4 4 +2 3 3 +1 4 2 + +query III +select * from t order by v1 desc nulls last limit 7; +---- +4 3 5 +3 4 4 +2 3 3 +1 4 2 +NULL 7 NULL + statement ok drop table t; diff --git a/e2e_test/batch/basic/time_window.slt.part b/e2e_test/batch/basic/time_window.slt.part index ec7187da29319..11b70352f3113 100644 --- a/e2e_test/batch/basic/time_window.slt.part +++ b/e2e_test/batch/basic/time_window.slt.part @@ -28,6 +28,21 @@ from tumble(t1, created_at, interval '30' minute) order by row_id, window_start; 7 1 2022-01-01 10:51:00 2022-01-01 10:30:00 2022-01-01 11:00:00 8 3 2022-01-01 11:02:00 2022-01-01 11:00:00 2022-01-01 11:30:00 + +query IITTT +select row_id, uid, created_at, window_start, window_end +from tumble(t1, created_at, interval '30' minute, interval '13' minute) order by row_id, window_start; +---- +1 1 2022-01-01 10:00:00 2022-01-01 09:43:00 2022-01-01 10:13:00 +2 3 2022-01-01 10:05:00 2022-01-01 09:43:00 2022-01-01 10:13:00 +3 2 2022-01-01 10:14:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +4 1 2022-01-01 10:22:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +5 3 2022-01-01 10:33:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +6 2 2022-01-01 10:42:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +7 1 2022-01-01 10:51:00 2022-01-01 10:43:00 2022-01-01 11:13:00 +8 3 2022-01-01 11:02:00 2022-01-01 10:43:00 2022-01-01 11:13:00 + + query IITTT select row_id, uid, created_at, window_start, window_end from hop(t1, created_at, interval '15' minute, interval '30' minute) order by row_id, window_start; @@ -49,6 +64,27 @@ from hop(t1, created_at, interval '15' minute, interval '30' minute) order by ro 8 3 2022-01-01 11:02:00 2022-01-01 10:45:00 2022-01-01 11:15:00 8 3 2022-01-01 11:02:00 2022-01-01 11:00:00 2022-01-01 11:30:00 +query IITTT +select row_id, uid, created_at, window_start, window_end +from hop(t1, created_at, interval '15' minute, interval '30' minute, interval '13' minute) order by row_id, window_start; +---- +1 1 2022-01-01 10:00:00 2022-01-01 09:43:00 2022-01-01 10:13:00 +1 1 2022-01-01 10:00:00 2022-01-01 09:58:00 2022-01-01 10:28:00 +2 3 2022-01-01 10:05:00 2022-01-01 09:43:00 2022-01-01 10:13:00 +2 3 2022-01-01 10:05:00 2022-01-01 09:58:00 2022-01-01 10:28:00 +3 2 2022-01-01 10:14:00 2022-01-01 09:58:00 2022-01-01 10:28:00 +3 2 2022-01-01 10:14:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +4 1 2022-01-01 10:22:00 2022-01-01 09:58:00 2022-01-01 10:28:00 +4 1 2022-01-01 10:22:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +5 3 2022-01-01 10:33:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +5 3 2022-01-01 10:33:00 2022-01-01 10:28:00 2022-01-01 10:58:00 +6 2 2022-01-01 10:42:00 2022-01-01 10:13:00 2022-01-01 10:43:00 +6 2 2022-01-01 10:42:00 2022-01-01 10:28:00 2022-01-01 10:58:00 +7 1 2022-01-01 10:51:00 2022-01-01 10:28:00 2022-01-01 10:58:00 +7 1 2022-01-01 10:51:00 2022-01-01 10:43:00 2022-01-01 11:13:00 +8 3 2022-01-01 11:02:00 2022-01-01 10:43:00 2022-01-01 11:13:00 +8 3 2022-01-01 11:02:00 2022-01-01 10:58:00 2022-01-01 11:28:00 + query IIT rowsort select row_id, uid, created_at from hop(t1, created_at, interval '15' minute, interval '30' minute); @@ -70,6 +106,29 @@ from hop(t1, created_at, interval '15' minute, interval '30' minute); 8 3 2022-01-01 11:02:00 8 3 2022-01-01 11:02:00 + +query IIT rowsort +select row_id, uid, created_at +from hop(t1, created_at, interval '15' minute, interval '30' minute, interval '13' minute); +---- +1 1 2022-01-01 10:00:00 +1 1 2022-01-01 10:00:00 +2 3 2022-01-01 10:05:00 +2 3 2022-01-01 10:05:00 +3 2 2022-01-01 10:14:00 +3 2 2022-01-01 10:14:00 +4 1 2022-01-01 10:22:00 +4 1 2022-01-01 10:22:00 +5 3 2022-01-01 10:33:00 +5 3 2022-01-01 10:33:00 +6 2 2022-01-01 10:42:00 +6 2 2022-01-01 10:42:00 +7 1 2022-01-01 10:51:00 +7 1 2022-01-01 10:51:00 +8 3 2022-01-01 11:02:00 +8 3 2022-01-01 11:02:00 + + query IT select sum(v), window_start from tumble(t1, created_at, interval '30' minute) @@ -79,6 +138,15 @@ group by window_start order by window_start; 18 2022-01-01 10:30:00 8 2022-01-01 11:00:00 +query IT +select sum(v), window_start +from tumble(t1, created_at, interval '30' minute, interval '13' minute) +group by window_start order by window_start; +---- +7 2022-01-01 09:43:00 +15 2022-01-01 10:13:00 +14 2022-01-01 10:43:00 + query IIT select uid, sum(v), window_start from tumble(t1, created_at, interval '30' minute) @@ -92,6 +160,20 @@ group by window_start, uid order by window_start, uid; 3 5 2022-01-01 10:30:00 3 8 2022-01-01 11:00:00 +query IIT +select uid, sum(v), window_start +from tumble(t1, created_at, interval '30' minute, interval '13' minute) +group by window_start, uid order by window_start, uid; +---- +1 4 2022-01-01 09:43:00 +3 3 2022-01-01 09:43:00 +1 1 2022-01-01 10:13:00 +2 9 2022-01-01 10:13:00 +3 5 2022-01-01 10:13:00 +1 6 2022-01-01 10:43:00 +3 8 2022-01-01 10:43:00 + + query IT select sum(v), window_start from hop(t1, created_at, interval '15' minute, interval '30' minute) @@ -104,6 +186,19 @@ group by window_start order by window_start; 14 2022-01-01 10:45:00 8 2022-01-01 11:00:00 + +query IT +select sum(v), window_start +from hop(t1, created_at, interval '15' minute, interval '30' minute, interval '13' minute) +group by window_start order by window_start; +---- +7 2022-01-01 09:43:00 +10 2022-01-01 09:58:00 +15 2022-01-01 10:13:00 +18 2022-01-01 10:28:00 +14 2022-01-01 10:43:00 +8 2022-01-01 10:58:00 + query IIT select uid, sum(v), window_start from hop(t1, created_at, interval '15' minute, interval '30' minute) @@ -125,6 +220,28 @@ group by window_start, uid order by window_start, uid; 3 8 2022-01-01 10:45:00 3 8 2022-01-01 11:00:00 + + +query IIT +select uid, sum(v), window_start +from hop(t1, created_at, interval '15' minute, interval '30' minute, interval '13' minute) +group by window_start, uid order by window_start, uid; +---- +1 4 2022-01-01 09:43:00 +3 3 2022-01-01 09:43:00 +1 5 2022-01-01 09:58:00 +2 2 2022-01-01 09:58:00 +3 3 2022-01-01 09:58:00 +1 1 2022-01-01 10:13:00 +2 9 2022-01-01 10:13:00 +3 5 2022-01-01 10:13:00 +1 6 2022-01-01 10:28:00 +2 7 2022-01-01 10:28:00 +3 5 2022-01-01 10:28:00 +1 6 2022-01-01 10:43:00 +3 8 2022-01-01 10:43:00 +3 8 2022-01-01 10:58:00 + statement error select * from hop(t1, created_at, interval '0', interval '1'); diff --git a/e2e_test/batch/explain.slt b/e2e_test/batch/explain.slt index 00e735d360aa0..cff6af31f2470 100644 --- a/e2e_test/batch/explain.slt +++ b/e2e_test/batch/explain.slt @@ -5,7 +5,7 @@ statement ok explain create index i on t(v); statement ok -explain create sink sink_t from t with ( connector = 'kafka', format = 'append_only' ) +explain create sink sink_t from t with ( connector = 'kafka', type = 'append-only' ) statement ok drop table t; diff --git a/e2e_test/batch/functions/pi.slt.part b/e2e_test/batch/functions/pi.slt.part new file mode 100644 index 0000000000000..2f19b5132420e --- /dev/null +++ b/e2e_test/batch/functions/pi.slt.part @@ -0,0 +1,32 @@ +statement ok +create table f64_table (a double); + +statement ok +insert into f64_table values(pi()); + +statement ok +insert into f64_table values(1.618033); + +statement ok +create table f32_table (a real); + +statement ok +insert into f32_table values(pi()); + +query I +SELECT pi() +---- +3.141592653589793 + +query I rowsort +SELECT pi(), a from f64_table +---- +3.141592653589793 1.618033 +3.141592653589793 3.141592653589793 + + +statement ok +drop table f64_table + +statement ok +drop table f32_table \ No newline at end of file diff --git a/e2e_test/batch/types/interval.slt.part b/e2e_test/batch/types/interval.slt.part index b0d5b59d9bc59..84e436a838087 100644 --- a/e2e_test/batch/types/interval.slt.part +++ b/e2e_test/batch/types/interval.slt.part @@ -74,20 +74,20 @@ SELECT INTERVAL '3 mins' * 1.5; 00:04:30 # https://github.com/risingwavelabs/risingwave/issues/3873 -query TTTTT +query T select distinct * from (values (interval '1' month), (interval '30' day)) as t; ---- -30 days +1 mon -query TTTTT +query T select distinct * from (values (interval '30' day), (interval '1' month)) as t; ---- -30 days +1 mon -query TTTTT +query T select distinct * from (values (interval '720' hour), (interval '1' month)) as t; ---- -30 days +1 mon query TTTTTT select interval '1 year 1 month 1 day 1'; @@ -109,8 +109,59 @@ select interval '1 year 1 month 1 day 1:1:1.009'; ---- 1 year 1 mon 1 day 01:01:01.009 -# issue#7059, if we improve precision, then this should be removed. -query TTTTTT +# issue #7059 +query T select '1 mons 1 days 00:00:00.000001'::INTERVAL; ---- +1 mon 1 day 00:00:00.000001 + +query T +select '1 mons 1 days 00:00:00.0000001'::INTERVAL; +---- 1 mon 1 day + +# parsing large values + +query T +select '2562047788:00:54.775807'::interval; +---- +2562047788:00:54.775807 + +statement error +select '2562047788:00:54.775808'::interval; + +query T +select '4 years 2147483599 mon'::interval; +---- +178956970 years 7 mons + +statement error +select '4 years 2147483600 mon'::interval; + +query T +select '-2562047788:00:54.775807'::interval; +---- +-2562047788:00:54.775807 + +query T +select '-2562047788:00:54.775808'::interval; +---- +-2562047788:00:54.775808 + +statement error +select '-2562047788:00:54.775809'::interval; + +query T +select interval '3 mons -3 days' / 2; +---- +1 mon 14 days -12:00:00 + +# The following is an overflow bug present in PostgreSQL 15.2 +# Their `days` overflows to a negative value, leading to the latter smaller +# than the former. We report an error in this case. + +statement ok +select interval '2147483647 mons 2147483647 days' * 0.999999991; + +statement error out of range +select interval '2147483647 mons 2147483647 days' * 0.999999992; diff --git a/e2e_test/batch/types/temporal_arithmetic.slt.part b/e2e_test/batch/types/temporal_arithmetic.slt.part index a07c029ae31b7..80ef05e1310ab 100644 --- a/e2e_test/batch/types/temporal_arithmetic.slt.part +++ b/e2e_test/batch/types/temporal_arithmetic.slt.part @@ -66,7 +66,7 @@ select interval '20' / float '12.5'; query T select interval '12 days' / 4.2; ---- -2 days 20:34:17.143 +2 days 20:34:17.142857 query T SELECT interval '1 month' / 2000; @@ -136,7 +136,7 @@ select real '6.1' * interval '1' second; query T select real '61.1' * interval '1' second; ---- -00:01:01.1 +00:01:01.099998 query T select real '0.0' * interval '1' second; @@ -151,7 +151,7 @@ select real '0' * interval '1' second; query T select real '86' * interval '849884'; ---- -2 years 4 mons 5 days 22:47:04 +20302:47:04 query T select interval '1' second * real '6.1'; @@ -161,7 +161,7 @@ select interval '1' second * real '6.1'; query T select interval '1' second * real '61.1'; ---- -00:01:01.1 +00:01:01.099998 query T select interval '1' second * real '0.0'; @@ -176,7 +176,7 @@ select interval '1' second * real '0'; query T select interval '849884' * real '86'; ---- -2 years 4 mons 5 days 22:47:04 +20302:47:04 query T select '12:30:00'::time * 2; diff --git a/e2e_test/ddl/alter_rename_relation.slt b/e2e_test/ddl/alter_rename_relation.slt new file mode 100644 index 0000000000000..a14bd2db3eb0f --- /dev/null +++ b/e2e_test/ddl/alter_rename_relation.slt @@ -0,0 +1,148 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +CREATE TABLE t (v1 INT primary key, v2 STRUCT>); + +statement ok +CREATE TABLE t_as AS ( WITH source_data AS ( SELECT 1 AS id) SELECT * FROM source_data); + +statement ok +CREATE MATERIALIZED VIEW mv AS SELECT v1, (t.v2).v1 AS v21 FROM t; + +statement ok +CREATE SINK sink AS SELECT mv3.v1 AS v1, mv3.v21 AS v2 FROM mv AS mv3 WITH ( + connector = 'blackhole' +); + +statement ok +CREATE VIEW v1 AS ( SELECT * FROM t_as WHERE id = 1); + +statement ok +CREATE VIEW v2 AS (SELECT COUNT(*) FROM t, t AS t2 WHERE t.v1 = t2.v1); + +statement ok +CREATE VIEW v3 AS (SELECT MAX((t.v2).v1) FROM t AS t); + +statement ok +CREATE VIEW v4 AS (SELECT * FROM t join t as t2 on (t.v1 = t2.v1) ORDER BY t.v1, t2.v1); + +statement ok +CREATE index idx ON t(v1); + +query TT +SHOW CREATE TABLE t; +---- +public.t CREATE TABLE t (v1 INT PRIMARY KEY, v2 STRUCT>) + +# alter table rename with alias conflict +statement ok +ALTER TABLE t RENAME TO t2; + +query TT +SHOW CREATE TABLE t2; +---- +public.t2 CREATE TABLE t2 (v1 INT PRIMARY KEY, v2 STRUCT>) + +query TT +SHOW CREATE VIEW v2; +---- +public.v2 CREATE VIEW v2 AS (SELECT COUNT(*) FROM t2 AS t, t2 AS t2 WHERE t.v1 = t2.v1) + +query TT +SHOW CREATE VIEW v3; +---- +public.v3 CREATE VIEW v3 AS (SELECT MAX((t.v2).v1) FROM t2 AS t) + +query TT +SHOW CREATE VIEW v4; +---- +public.v4 CREATE VIEW v4 AS (SELECT * FROM t2 AS t JOIN t2 AS t2 ON (t.v1 = t2.v1) ORDER BY t.v1, t2.v1) + +query TT +SHOW CREATE MATERIALIZED VIEW mv; +---- +public.mv CREATE MATERIALIZED VIEW mv AS SELECT v1, (t.v2).v1 AS v21 FROM t2 AS t + +# alter mview rename +statement ok +ALTER MATERIALIZED VIEW mv RENAME TO mv2; + +query TT +SHOW CREATE MATERIALIZED VIEW mv2; +---- +public.mv2 CREATE MATERIALIZED VIEW mv2 AS SELECT v1, (t.v2).v1 AS v21 FROM t2 AS t + +statement ok +ALTER SINK sink RENAME TO sink1; + +# alter mview rename with alias conflict, used by sink1 +statement ok +ALTER MATERIALIZED VIEW mv2 RENAME TO mv3; + +statement ok +ALTER TABLE t_as RENAME TO t_as_1; + +# alter view rename +statement ok +ALTER VIEW v1 RENAME TO v5; + +query TT +SHOW CREATE VIEW v5; +---- +public.v5 CREATE VIEW v5 AS (SELECT * FROM t_as_1 AS t_as WHERE id = 1) + +statement ok +ALTER INDEX idx RENAME TO idx1; + +statement ok +INSERT INTO t2 VALUES(1,(1,(1,2))); + +statement ok +INSERT INTO t2 VALUES(2,(2,(2,4))); + +query II rowsort +SELECT * from mv3 +---- +1 1 +2 2 + +query I +SELECT * from v2 +---- +2 + +query I +SELECT * from v3 +---- +2 + +query IIII rowsort +SELECT * from v4 +---- +1 (1,(1,2)) 1 (1,(1,2)) +2 (2,(2,4)) 2 (2,(2,4)) + +statement ok +DROP SINK sink1; + +statement ok +DROP VIEW v5; + +statement ok +DROP VIEW v4; + +statement ok +DROP VIEW v3; + +statement ok +DROP VIEW v2; + +statement ok +DROP MATERIALIZED VIEW mv3; + +statement ok +DROP TABLE t2; + +statement ok +DROP TABLE t_as_1; diff --git a/e2e_test/ddl/alter_table_column.slt b/e2e_test/ddl/alter_table_column.slt index 35ffcf205453c..fc142e1510191 100644 --- a/e2e_test/ddl/alter_table_column.slt +++ b/e2e_test/ddl/alter_table_column.slt @@ -127,7 +127,6 @@ select * from mv3; 3 3.3 3-3 # Drop column -# TODO(#4529): create mview on partial columns and test whether dropping the unrefereced column works. statement error being referenced alter table t drop column s; @@ -137,6 +136,10 @@ drop materialized view mv2; statement ok drop materialized view mv3; +statement ok +create materialized view mv5 as select v, s from t; + +# This should succeed as there's no materialized view referencing the column, including `mv5`. (#4529) statement ok alter table t drop column r; @@ -152,6 +155,13 @@ select v, s from t; 2 NULL 3 3-3 +query IR rowsort +select v, s from mv5; +---- +1 1-1 +2 NULL +3 3-3 + # Add column after dropping column, to test that the column ID is not reused. statement ok alter table t add column r real; @@ -191,8 +201,43 @@ select v, s, r from t; 4 4-4 4.4 # Clean up +statement ok +drop materialized view mv5; + statement ok drop materialized view mv; statement ok drop table t; + +# Test the consistency of tables and indexes #https://github.com/risingwavelabs/risingwave/issues/8649 +statement ok +create table t(id int primary key, a int, b varchar); + +statement ok +create index idx on t(a); + +statement ok +alter table t add column c int; + +query IITI rowsort +select * from t; +---- + +statement ok +drop table t; + +statement ok +create table t(id int primary key, a int, b varchar); + +statement ok +create index idx on t(b) include(b); + +statement ok +alter table t drop column a; + +query II rowsort +select * from t where b = '1'; + +statement ok +drop table t; \ No newline at end of file diff --git a/e2e_test/ddl/invalid_operation.slt b/e2e_test/ddl/invalid_operation.slt index e236acd071a2b..e1a7ad90db12e 100644 --- a/e2e_test/ddl/invalid_operation.slt +++ b/e2e_test/ddl/invalid_operation.slt @@ -264,7 +264,7 @@ SELECT * from v limit 0; statement ok insert into t values (1); -statement ok +statement error QueryError: Bind error: update modifying the PK column is banned update t set v = 2; statement ok diff --git a/e2e_test/ddl/show.slt b/e2e_test/ddl/show.slt index 5c1a69d2c9930..f70dfb853c5f4 100644 --- a/e2e_test/ddl/show.slt +++ b/e2e_test/ddl/show.slt @@ -32,7 +32,7 @@ v1 Int32 v2 Int32 v3 Int32 primary key _row_id -idx1 index(v1, v2) include(v3) distributed by(v1, v2) +idx1 index(v1 ASC, v2 ASC) include(v3) distributed by(v1, v2) statement ok drop index idx1; diff --git a/e2e_test/ddl/table.slt b/e2e_test/ddl/table.slt index dacebfe493b36..4c076b979fc25 100644 --- a/e2e_test/ddl/table.slt +++ b/e2e_test/ddl/table.slt @@ -1,258 +1 @@ -# Create a table. -statement ok -create table ddl_t (v1 int); - -statement ok -explain select v1 from ddl_t; - -# Create another table with duplicated name. -statement error -create table ddl_t (v2 int); - -# Create a table using a empty string. -statement error -create table ""(v2 int); - -statement ok -create table if not exists ddl_t (v2 int); - -# Drop the table. -statement ok -drop table ddl_t; - -# Drop it again. -statement error -drop table ddl_t; - -# Create another table with the same name. -statement ok -create table ddl_t (v2 int); - -statement ok -explain select v2 from ddl_t; - -# Create a mview on top of it. -statement ok -create materialized view ddl_mv as select v2 from ddl_t; - -statement ok -explain select v2 from ddl_t; - -statement ok -explain create sink sink_t from ddl_t with ( connector = 'kafka', format = 'append_only', force_append_only = 'true' ); - -statement ok -explain create sink sink_as as select sum(v2) as sum from ddl_t with ( connector = 'kafka', format = 'append_only', force_append_only = 'true' ); - -# Create a mview with duplicated name. -statement error -create materialized view ddl_mv as select v2 from ddl_t; - -# Drop the table before dropping the mview. -statement error -drop table ddl_t; - -# We're not allowed to drop the mview using `DROP TABLE`. -statement error -drop table ddl_mv; - -# Drop the mview. -statement ok -drop materialized view ddl_mv; - -# Drop it again. -statement error -drop materialized view ddl_mv; - -# We're not allowed to drop the table using `DROP MATERIALIZED VIEW`. -statement error -drop materialized view ddl_t; - -# Now, we can drop the base table. -statement ok -drop table ddl_t; - -# Create table concludes struct column. -statement ok -create table st (v1 int, v2 struct>); - -statement ok -drop table st - -# We test the case sensitivity of table name and column name. -statement ok -create table t1 (v1 int); - -statement ok -drop table T1; - -statement ok -create table T1 (v1 int); - -statement ok -drop table t1; - -statement ok -create table "T1" (v1 int); - -# Since we have not really bound the columns in the insert statement -# this test case cannot be enabled. -# statement error -# insert into "T1" ("V1") values (1); - -statement error -drop table t1; - -statement error -drop table T1; - -statement ok -drop table "T1"; - -statement ok -create table "T2" ("V1" int); - -# Since we have not really bound the columns in the insert statement -# this test case cannot be enabled. -# statement error -# insert into "T2" (V1) values (1); - -statement ok -insert into "T2" ("V1") values (1); - -statement error -create table C1 (c1 varchar(5)); - -statement error -create table t (v1 int not null); - -statement error -create table t (v1 varchar collate "en_US"); - -# Test create-table-as -statement ok -create table t as select 1; - -statement ok -drop table t; - -statement error -create table t as select 1,2; - -statement ok -create table t as select 1 as a, 2 as b; - -statement ok -drop table t; - -statement ok -create table t(v1) as select 1; - -statement ok -drop table t; - -statement ok -create table t (v1 int,v2 int); - -statement ok -insert into t values (1,1); - -statement ok -insert into t values (1,1); - -statement ok -insert into t values (1,1); - -statement ok -flush - -statement ok -create table t1 as select * from t; - -statement ok -flush; - -query I -select * from t1; ----- -1 1 -1 1 -1 1 - -statement ok -drop table t1; - -statement ok -drop table t; - -statement ok -create table t AS SELECT * FROM generate_series(0, 5,1) tbl(i); - -statement ok -flush; - -query I -select * from t order by i; ----- -0 -1 -2 -3 -4 -5 - -statement ok -drop table t; - -statement ok -create table t (v1 int); - -statement ok -insert into t values (1); - -statement ok -insert into t values (2); - -statement ok -create table n1 as select sum(v1) from t; - -statement ok -flush; - -query I -select * from n1; ----- -3 - -statement error -create table n1 (v2 int); - -statement error -create table n1 as select * from t; - -statement ok -create table if not exists n1 (v2 int); - -statement ok -drop table n1; - -statement ok -drop table t; - -statement ok -create table t (v1 int,v2 int); - -statement ok -create table t1(a,b) as select v1,v2 from t; - -statement ok -create table t2(a) as select v1,v2 from t; - -statement ok -drop table t; - -statement ok -drop table t1; - -statement ok -drop table t2; +include ./table/*.slt.part diff --git a/e2e_test/ddl/table/generated_columns.slt.part b/e2e_test/ddl/table/generated_columns.slt.part new file mode 100644 index 0000000000000..f6c0a18067838 --- /dev/null +++ b/e2e_test/ddl/table/generated_columns.slt.part @@ -0,0 +1,41 @@ +# Create a table with generated columns. +statement ok +create table t1 (v1 int as v2-1, v2 int, v3 int as v2+1); + +statement ok +insert into t1 (v2) values (1), (2); + +statement ok +flush; + +query III +select * from t1; +---- +0 1 2 +1 2 3 + +statement ok +drop table t1; + +# Create a table with generated columns. +statement ok +create table t2 (v1 int, v2 int as v1+1); + +statement ok +insert into t2 values (1), (2); + +statement ok +flush; + +query II +select * from t2; +---- +1 2 +2 3 + +statement ok +drop table t2; + +# Generated column reference another generated column +statement error +create table t2 (v1 int as v2+1, v2 int, v3 int as v1-1); diff --git a/e2e_test/ddl/table/table.slt.part b/e2e_test/ddl/table/table.slt.part new file mode 100644 index 0000000000000..525982f2c579d --- /dev/null +++ b/e2e_test/ddl/table/table.slt.part @@ -0,0 +1,261 @@ +# Create a table. +statement ok +create table ddl_t (v1 int); + +statement ok +explain select v1 from ddl_t; + +# Create another table with duplicated name. +statement error +create table ddl_t (v2 int); + +# Create a table using a empty string. +statement error +create table ""(v2 int); + +statement ok +create table if not exists ddl_t (v2 int); + +# Drop the table. +statement ok +drop table ddl_t; + +# Drop it again. +statement error +drop table ddl_t; + +# Create another table with the same name. +statement ok +create table ddl_t (v2 int); + +statement ok +explain select v2 from ddl_t; + +# Create a mview on top of it. +statement ok +create materialized view ddl_mv as select v2 from ddl_t; + +statement ok +explain select v2 from ddl_t; + +statement ok +explain create sink sink_t from ddl_t with ( connector = 'kafka', type = 'append-only', force_append_only = 'true' ); + +statement ok +explain create sink sink_as as select sum(v2) as sum from ddl_t with ( connector = 'kafka', type = 'append-only', force_append_only = 'true' ); + +# Create a mview with duplicated name. +statement error +create materialized view ddl_mv as select v2 from ddl_t; + +# Drop the table before dropping the mview. +statement error +drop table ddl_t; + +# We're not allowed to drop the mview using `DROP TABLE`. +statement error +drop table ddl_mv; + +# Drop the mview. +statement ok +drop materialized view ddl_mv; + +# Drop it again. +statement error +drop materialized view ddl_mv; + +# We're not allowed to drop the table using `DROP MATERIALIZED VIEW`. +statement error +drop materialized view ddl_t; + +# Now, we can drop the base table. +statement ok +drop table ddl_t; + +# Create table concludes struct column. +statement ok +create table st (v1 int, v2 struct>); + +statement ok +drop table st + +# We test the case sensitivity of table name and column name. +statement ok +create table t1 (v1 int); + +statement ok +drop table T1; + +statement ok +create table T1 (v1 int); + +statement ok +drop table t1; + +statement ok +create table "T1" (v1 int); + +# Since we have not really bound the columns in the insert statement +# this test case cannot be enabled. +# statement error +# insert into "T1" ("V1") values (1); + +statement error +drop table t1; + +statement error +drop table T1; + +statement ok +drop table "T1"; + +statement ok +create table "T2" ("V1" int); + +# Since we have not really bound the columns in the insert statement +# this test case cannot be enabled. +# statement error +# insert into "T2" (V1) values (1); + +statement ok +insert into "T2" ("V1") values (1); + +statement ok +drop table "T2" + +statement error +create table C1 (c1 varchar(5)); + +statement error +create table t (v1 int not null); + +statement error +create table t (v1 varchar collate "en_US"); + +# Test create-table-as +statement ok +create table t as select 1; + +statement ok +drop table t; + +statement error +create table t as select 1,2; + +statement ok +create table t as select 1 as a, 2 as b; + +statement ok +drop table t; + +statement ok +create table t(v1) as select 1; + +statement ok +drop table t; + +statement ok +create table t (v1 int,v2 int); + +statement ok +insert into t values (1,1); + +statement ok +insert into t values (1,1); + +statement ok +insert into t values (1,1); + +statement ok +flush + +statement ok +create table t1 as select * from t; + +statement ok +flush; + +query I +select * from t1; +---- +1 1 +1 1 +1 1 + +statement ok +drop table t1; + +statement ok +drop table t; + +statement ok +create table t AS SELECT * FROM generate_series(0, 5,1) tbl(i); + +statement ok +flush; + +query I +select * from t order by i; +---- +0 +1 +2 +3 +4 +5 + +statement ok +drop table t; + +statement ok +create table t (v1 int); + +statement ok +insert into t values (1); + +statement ok +insert into t values (2); + +statement ok +create table n1 as select sum(v1) from t; + +statement ok +flush; + +query I +select * from n1; +---- +3 + +statement error +create table n1 (v2 int); + +statement error +create table n1 as select * from t; + +statement ok +create table if not exists n1 (v2 int); + +statement ok +drop table n1; + +statement ok +drop table t; + +statement ok +create table t (v1 int,v2 int); + +statement ok +create table t1(a,b) as select v1,v2 from t; + +statement ok +create table t2(a) as select v1,v2 from t; + +statement ok +drop table t; + +statement ok +drop table t1; + +statement ok +drop table t2; diff --git a/e2e_test/extended_query/basic.slt b/e2e_test/extended_mode/basic.slt similarity index 54% rename from e2e_test/extended_query/basic.slt rename to e2e_test/extended_mode/basic.slt index c2cf66d73295d..b54fa84d6b4b2 100644 --- a/e2e_test/extended_query/basic.slt +++ b/e2e_test/extended_mode/basic.slt @@ -1,14 +1,4 @@ -# The basic.slt is to cover the path of pgwire. -# -# There are two kinds of statement, they run different path of pgwire: -# 1. un-query statement: SET,CREATE,INSERT,FLUSH,EXPLAIN,DROP.. -# 2. query statement: SELECT,WITH,VALUES,SHOW,DESCRIBE.. -# -# We also need to test different type in extended query mode: -# smallint,int,bigint -# real,double precision,numeric -# time,date,timestamp - +# Test different statements(DDL,DQL,DML) in extended mode. statement ok SET RW_IMPLICIT_FLUSH TO true; @@ -78,18 +68,3 @@ with t as (select generate_series(1,3,1)) select * from t; 1 2 3 - -query III -select 42::smallint, 42::int, 42::bigint; ----- -42 42 42 - -query III -select 42::real,42::double precision,42::decimal; ----- -42 42 42 - -query TTT -select '20:55:12'::time,'2022-07-12'::date,'2022-07-12 20:55:12'::timestamp; ----- -20:55:12 2022-07-12 2022-07-12 20:55:12 diff --git a/e2e_test/extended_mode/type.slt b/e2e_test/extended_mode/type.slt new file mode 100644 index 0000000000000..2271ecb51c5c9 --- /dev/null +++ b/e2e_test/extended_mode/type.slt @@ -0,0 +1,28 @@ +# Test binary format of different type. (sqllogitest return binary format in extended mode) + +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +# RisingWave can't support list and struct now so we skip them. +# include ../batch/types/array.slt.part +# include ../batch/types/struct.slt.part +# include ../batch/types/list.slt.part + +# Sqllogitest can't support binary format bytea type so we skip it. +# include ../batch/types/bytea.slt.part + +# Can't support inf,-inf binary format now so we skip it. +# include ../batch/types/decimal.slt.part + +# Sqllogitest can't support binary format jsonb type so we skip it. +# include ../batch/types/jsonb_ord.slt.part +# include ../batch/types/jsonb.slt.part + +include ../batch/types/boolean.slt.part +include ../batch/types/cast.slt.part +include ../batch/types/date.slt +include ../batch/types/intercal.slt.part +include ../batch/types/number_arithmetic.slt.part +include ../batch/types/temporal_arithmetic.slt.part +include ../batch/types/time.slt.part +include ../batch/types/timestamptz_utc.slt.part diff --git a/e2e_test/s3/run_csv.py b/e2e_test/s3/run_csv.py new file mode 100644 index 0000000000000..c5412c1e57d07 --- /dev/null +++ b/e2e_test/s3/run_csv.py @@ -0,0 +1,155 @@ +import os +import string +import json +import string +from time import sleep +from minio import Minio +import psycopg2 +import random + + +def do_test(config, N, n, prefix): + conn = psycopg2.connect( + host="localhost", + port="4566", + user="root", + database="dev" + ) + + # Open a cursor to execute SQL statements + cur = conn.cursor() + cur.execute(f'''CREATE TABLE s3_test_csv_without_headers( + a int, + b int, + c int, + ) WITH ( + connector = 's3', + match_pattern = '{prefix}_data_without_headers.csv', + s3.region_name = '{config['S3_REGION']}', + s3.bucket_name = '{config['S3_BUCKET']}', + s3.credentials.access = '{config['S3_ACCESS_KEY']}', + s3.credentials.secret = '{config['S3_SECRET_KEY']}', + s3.endpoint_url = 'https://{config['S3_ENDPOINT']}' + ) ROW FORMAT CSV WITHOUT HEADER DELIMITED BY ',';''') + + cur.execute(f'''CREATE TABLE s3_test_csv_with_headers( + a int, + b int, + c int, + ) WITH ( + connector = 's3', + match_pattern = '{prefix}_data_with_headers.csv', + s3.region_name = '{config['S3_REGION']}', + s3.bucket_name = '{config['S3_BUCKET']}', + s3.credentials.access = '{config['S3_ACCESS_KEY']}', + s3.credentials.secret = '{config['S3_SECRET_KEY']}', + s3.endpoint_url = 'https://{config['S3_ENDPOINT']}' + ) ROW FORMAT CSV DELIMITED BY ',';''') + + total_row = int(N * n) + sleep(60) + while True: + sleep(60) + cur.execute('select count(*) from s3_test_csv_with_headers') + result_with_headers = cur.fetchone() + cur.execute('select count(*) from s3_test_csv_without_headers') + result_without_headers = cur.fetchone() + if result_with_headers[0] == total_row and result_without_headers[0] == total_row: + break + print( + f"Now got {result_with_headers[0]} rows in table, {total_row} expected, wait 60s") + + cur.execute( + 'select count(*), sum(a), sum(b), sum(c) from s3_test_csv_with_headers') + result_with_headers = cur.fetchone() + + cur.execute( + 'select count(*), sum(a), sum(b), sum(c) from s3_test_csv_without_headers') + s3_test_csv_without_headers = cur.fetchone() + + print(result_with_headers, s3_test_csv_without_headers, + int(((N - 1) * N / 2) * n), int(N*n / 2)) + + assert s3_test_csv_without_headers[0] == total_row + assert s3_test_csv_without_headers[1] == int(((N - 1) * N / 2) * n) + assert s3_test_csv_without_headers[2] == int(N*n / 2) + assert s3_test_csv_without_headers[3] == 0 + + assert result_with_headers[0] == total_row + assert result_with_headers[1] == 0 + assert result_with_headers[2] == int(N*n / 2) + assert result_with_headers[3] == int(((N - 1) * N / 2) * n) + + cur.execute('drop table s3_test_csv_with_headers') + cur.execute('drop table s3_test_csv_without_headers') + + cur.close() + conn.close() + + +if __name__ == "__main__": + config = json.loads(os.environ["S3_SOURCE_TEST_CONF"]) + run_id = str(random.randint(1000, 9999)) + N = 10000 + # do_test(config, N, 0, run_id) + items = [",".join([str(j), str(j % 2), str(-1 if j % 2 else 1)]) + for j in range(N) + ] + + data = "\n".join(items) + "\n" + n = 10 + with open("data_without_headers.csv", "w") as f: + for _ in range(10): + f.write(data) + os.fsync(f.fileno()) + + with open("data_with_headers.csv", "w") as f: + f.write("c,b,a\n") + for _ in range(10): + f.write(data) + os.fsync(f.fileno()) + + client = Minio( + config["S3_ENDPOINT"], + access_key=config["S3_ACCESS_KEY"], + secret_key=config["S3_SECRET_KEY"], + secure=True + ) + + try: + client.fput_object( + config["S3_BUCKET"], + f"{run_id}_data_without_headers.csv", + f"data_without_headers.csv" + + ) + client.fput_object( + config["S3_BUCKET"], + f"{run_id}_data_with_headers.csv", + f"data_with_headers.csv" + ) + print( + f"Uploaded {run_id}_data_with_headers.csv & {run_id}_data_with_headers.csv to S3") + os.remove(f"data_with_headers.csv") + os.remove(f"data_without_headers.csv") + except Exception as e: + print(f"Error uploading test files") + + return_code = 0 + try: + do_test(config, N, n, run_id) + except Exception as e: + print("Test failed", e) + return_code = 1 + + # Clean up + for i in range(20): + try: + client.remove_object( + config["S3_BUCKET"], f"{run_id}_data_with_headers.csv") + client.remove_object( + config["S3_BUCKET"], f"{run_id}_data_without_headers.csv") + except Exception as e: + print(f"Error removing testing files {e}") + + exit(return_code) diff --git a/e2e_test/sink/append_only_sink.slt b/e2e_test/sink/append_only_sink.slt index cf4e185d1b28f..57f194f033f4f 100644 --- a/e2e_test/sink/append_only_sink.slt +++ b/e2e_test/sink/append_only_sink.slt @@ -1,35 +1,20 @@ statement ok -create table t1 (v1 int, v2 int); - -statement error No primary key for the upsert sink -create sink s1 from t1 with (connector = 'console'); - -statement ok -create sink s1 as select v1, v2, _row_id from t1 with (connector = 'console'); - -statement ok -create table t2 (v1 int, v2 int primary key); - -statement ok -create sink s2 from t2 with (connector = 'console'); - -statement error No primary key for the upsert sink -create sink s3 as select avg(v1) from t2 with (connector = 'console'); +create table t (v1 int, v2 int); statement ok -create sink s3 as select avg(v1) from t2 with (connector = 'console', format = 'append_only', force_append_only = 'true'); +create sink s1 from t with (connector = 'console'); statement ok -create sink s4 as select avg(v1), v2 from t2 group by v2 with (connector = 'console'); +create sink s2 as select avg(v1), v2 from t group by v2 with (connector = 'console'); statement error The sink cannot be append-only -create sink s5 from t2 with (connector = 'console', format = 'append_only'); +create sink s3 from t with (connector = 'console', type = 'append-only'); statement ok -create sink s5 from t2 with (connector = 'console', format = 'append_only', force_append_only = 'true'); +create sink s3 from t with (connector = 'console', type = 'append-only', force_append_only = 'true'); statement error Cannot force the sink to be append-only -create sink s6 from t2 with (connector = 'console', format = 'upsert', force_append_only = 'true'); +create sink s4 from t with (connector = 'console', type = 'upsert', force_append_only = 'true'); statement ok drop sink s1 @@ -41,13 +26,4 @@ statement ok drop sink s3 statement ok -drop sink s4 - -statement ok -drop sink s5 - -statement ok -drop table t1 - -statement ok -drop table t2 +drop table t diff --git a/e2e_test/sink/iceberg_sink.slt b/e2e_test/sink/iceberg_sink.slt index 9b9e51c095cdd..2a214c0710ddd 100644 --- a/e2e_test/sink/iceberg_sink.slt +++ b/e2e_test/sink/iceberg_sink.slt @@ -1,21 +1,33 @@ statement ok -CREATE TABLE t6 (v1 int primary key, v2 int); +CREATE TABLE t6 (v1 int primary key, v2 bigint, v3 varchar); statement ok CREATE MATERIALIZED VIEW mv6 AS SELECT * FROM t6; statement ok -CREATE SINK s6 AS select mv6.v1 as v1, mv6.v2 as v2 from mv6 WITH ( +CREATE SINK s6 AS select mv6.v1 as v1, mv6.v2 as v2, mv6.v3 as v3 from mv6 WITH ( connector = 'iceberg', - sink.mode='append-only', - location.type='minio', - warehouse.path='minio://hummockadmin:hummockadmin@127.0.0.1:9301/iceberg', + type = 'upsert', + primary_key = 'v1', + warehouse.path = 's3://iceberg', + s3.endpoint = 'http://127.0.0.1:9301', + s3.access.key = 'hummockadmin', + s3.secret.key = 'hummockadmin', database.name='demo_db', table.name='demo_table' ); statement ok -INSERT INTO t6 VALUES (1, 2), (2, 2), (3, 2), (5, 2), (8, 2), (13, 2), (21, 2); +INSERT INTO t6 VALUES (1, 2, '1-2'), (2, 2, '2-2'), (3, 2, '3-2'), (5, 2, '5-2'), (8, 2, '8-2'), (13, 2, '13-2'), (21, 2, '21-2'); + +statement ok +FLUSH; + +statement ok +INSERT INTO t6 VALUES (1, 50, '1-50'); + +statement ok +FLUSH; statement ok DROP SINK s6; @@ -25,6 +37,3 @@ DROP MATERIALIZED VIEW mv6; statement ok DROP TABLE t6; - -statement ok -FLUSH; diff --git a/e2e_test/sink/remote/jdbc.check.pg.slt b/e2e_test/sink/remote/jdbc.check.pg.slt index 601a0b29e57b7..bd00a7938a898 100644 --- a/e2e_test/sink/remote/jdbc.check.pg.slt +++ b/e2e_test/sink/remote/jdbc.check.pg.slt @@ -3,8 +3,8 @@ query I select * from t_remote order by id; ---- -1 Alex -3 Carl -4 Doris -5 Eve -6 Frank \ No newline at end of file +1 Alex 28208 281620391 4986480304337356800 28162.0391 2.03 28162.0391 2023-03-20 10:18:30 +3 Carl 18300 1702307129 7878292368468104192 17023.07129 23.07 17023.07129 2023-03-20 10:18:32 +4 Doris 17250 151951802 3946135584462581760 1519518.02 18.02 1519518.02 2023-03-21 10:18:30 +5 Eve 9725 698160808 524334216698825600 69.8160808 69.81 69.8160808 2023-03-21 10:18:31 +6 Frank 28131 1233587627 8492820454814063616 123358.7627 58.76 123358.7627 2023-03-21 10:18:32 diff --git a/e2e_test/sink/remote/jdbc.load.slt b/e2e_test/sink/remote/jdbc.load.slt index 88bec6048187a..9adbb40a1b972 100644 --- a/e2e_test/sink/remote/jdbc.load.slt +++ b/e2e_test/sink/remote/jdbc.load.slt @@ -1,5 +1,15 @@ statement ok -create table t_remote (id integer primary key, name varchar); +create table t_remote ( + id integer primary key, + v_varchar varchar, + v_smallint smallint, + v_integer integer, + v_bigint bigint, + v_decimal decimal, + v_float float, + v_double double, + v_timestamp timestamp +); statement ok create materialized view mv_remote as select * from t_remote; @@ -19,16 +29,22 @@ CREATE SINK s_mysql FROM mv_remote WITH ( ); statement ok -INSERT INTO t_remote VALUES (1, 'Alice'), (2, 'Bob'), (3, 'Carl'); +INSERT INTO t_remote VALUES + (1, 'Alice', 28208, 281620391, 4986480304337356659, 28162.0391, 2.03, 28162.0391, '2023-03-20 10:18:30'), + (2, 'Bob', 10580, 2131030003, 3074255027698877876, 21310.30003, 10.3, 21310.30003, '2023-03-20 10:18:31'), + (3, 'Carl', 18300, 1702307129, 7878292368468104216, 17023.07129, 23.07, 17023.07129, '2023-03-20 10:18:32'); statement ok -INSERT INTO t_remote VALUES (4, 'Doris'), (5, 'Eve'), (6, 'Frank'); +INSERT INTO t_remote VALUES + (4, 'Doris', 17250, 151951802, 3946135584462581863, 1519518.02, 18.02, 1519518.02, '2023-03-21 10:18:30'), + (5, 'Eve', 9725, 698160808, 524334216698825611, 69.8160808, 69.81, 69.8160808, '2023-03-21 10:18:31'), + (6, 'Frank', 28131, 1233587627, 8492820454814063326, 123358.7627, 58.76, 123358.7627, '2023-03-21 10:18:32'); statement ok FLUSH; statement ok -UPDATE t_remote SET name = 'Alex' WHERE id = 1; +UPDATE t_remote SET v_varchar = 'Alex' WHERE id = 1; statement ok DELETE FROM t_remote WHERE id = 2; diff --git a/e2e_test/sink/remote/mysql_create_table.sql b/e2e_test/sink/remote/mysql_create_table.sql new file mode 100644 index 0000000000000..491072eb59d63 --- /dev/null +++ b/e2e_test/sink/remote/mysql_create_table.sql @@ -0,0 +1,11 @@ +CREATE TABLE t_remote ( + id integer PRIMARY KEY, + v_varchar varchar(100), + v_smallint smallint, + v_integer integer, + v_bigint bigint, + v_decimal decimal, + v_float float, + v_double double, + v_timestamp timestamp +); \ No newline at end of file diff --git a/e2e_test/sink/remote/mysql_expected_result.tsv b/e2e_test/sink/remote/mysql_expected_result.tsv new file mode 100644 index 0000000000000..8e738579032d2 --- /dev/null +++ b/e2e_test/sink/remote/mysql_expected_result.tsv @@ -0,0 +1,5 @@ +1 Alex 28208 281620391 4986480304337356800 28162 2.03 28162.0391 2023-03-20 10:18:30 +3 Carl 18300 1702307129 7878292368468104192 17023 23.07 17023.07129 2023-03-20 10:18:32 +4 Doris 17250 151951802 3946135584462581760 1519518 18.02 1519518.02 2023-03-21 10:18:30 +5 Eve 9725 698160808 524334216698825600 70 69.81 69.8160808 2023-03-21 10:18:31 +6 Frank 28131 1233587627 8492820454814063616 123359 58.76 123358.7627 2023-03-21 10:18:32 diff --git a/e2e_test/sink/remote/pg_create_table.sql b/e2e_test/sink/remote/pg_create_table.sql new file mode 100644 index 0000000000000..c20e3386e8d06 --- /dev/null +++ b/e2e_test/sink/remote/pg_create_table.sql @@ -0,0 +1,11 @@ +CREATE TABLE t_remote ( + id integer PRIMARY KEY, + v_varchar varchar(100), + v_smallint smallint, + v_integer integer, + v_bigint bigint, + v_decimal decimal, + v_float real, + v_double double precision, + v_timestamp timestamp +); \ No newline at end of file diff --git a/e2e_test/source/basic/kafka.slt b/e2e_test/source/basic/kafka.slt index 0e6f7e6bca00b..5563e01dd15b9 100644 --- a/e2e_test/source/basic/kafka.slt +++ b/e2e_test/source/basic/kafka.slt @@ -79,7 +79,7 @@ from s5 with ( properties.bootstrap.server = '127.0.0.1:29092', topic = 'sink_target', - format = 'append_only', + type = 'append-only', connector = 'kafka' ) @@ -384,7 +384,7 @@ select id, first_name, last_name, email from s8; query IITFFBTT select id, sequence_id, name, score, avg_score, is_lasted, entrance_date, birthday, passed from s9; ---- -32 64 str_value 32 64 t 1970-01-01 1970-01-01 00:00:00 1 mon 1 day 00:00:01 +32 64 str_value 32 64 t 1970-01-01 1970-01-01 00:00:00+00:00 1 mon 1 day 00:00:01 query ITITT select id, code, timestamp, xfas, contacts, sex from s10; diff --git a/e2e_test/source/cdc/cdc.load.slt b/e2e_test/source/cdc/cdc.load.slt index 4258636e804ac..48a5b9197927f 100644 --- a/e2e_test/source/cdc/cdc.load.slt +++ b/e2e_test/source/cdc/cdc.load.slt @@ -57,7 +57,6 @@ create table shipments ( username = 'postgres', password = 'postgres', database.name = 'cdc_test', - schema.name = 'public', table.name = 'shipments', slot.name = 'shipments' ); diff --git a/e2e_test/source/cdc/cdc.validate.postgres.slt b/e2e_test/source/cdc/cdc.validate.postgres.slt index ddc46fe92c377..15fce0b554dc9 100644 --- a/e2e_test/source/cdc/cdc.validate.postgres.slt +++ b/e2e_test/source/cdc/cdc.validate.postgres.slt @@ -16,7 +16,6 @@ create table shipments ( username = 'posres', password = 'postgres', database.name = 'cdc_test', - schema.name = 'public', table.name = 'shipments', slot.name = 'shipments' ); @@ -38,7 +37,6 @@ create table shipments ( username = 'postgres', password = 'otgres', database.name = 'cdc_test', - schema.name = 'public', table.name = 'shipments', slot.name = 'shipments' ); @@ -59,7 +57,6 @@ create table shipments ( username = 'postgres', password = 'postgres', database.name = 'cdc_test', - schema.name = 'public', table.name = 'shipment', slot.name = 'shipments' ); diff --git a/e2e_test/streaming/array_agg.slt b/e2e_test/streaming/array_agg.slt index 42c02a05c47ab..cf76386e3866e 100644 --- a/e2e_test/streaming/array_agg.slt +++ b/e2e_test/streaming/array_agg.slt @@ -13,9 +13,6 @@ create materialized view mv1 as select array_agg(c) as res from t; statement ok create materialized view mv2 as select array_agg(a order by b asc, a desc) as res from t; -statement ok -flush; - query T select u from (select unnest(res) from mv1) p(u) order by u; ---- @@ -47,11 +44,25 @@ select * from mv2; ---- {ccc,bbb,x,ddd,aaa,y} +statement ok +create materialized view mv3 as select array_agg(a order by b nulls first, a nulls last) as res from t; + +statement ok +insert into t values (NULL, NULL, 2), ('z', NULL, 6); + +query T +select * from mv3; +---- +{z,NULL,bbb,ccc,aaa,ddd,x,y} + statement ok drop materialized view mv1; statement ok drop materialized view mv2; +statement ok +drop materialized view mv3; + statement ok drop table t; diff --git a/e2e_test/streaming/bug_fixes/issue_8084.slt b/e2e_test/streaming/bug_fixes/issue_8084.slt new file mode 100644 index 0000000000000..446620cd57c4b --- /dev/null +++ b/e2e_test/streaming/bug_fixes/issue_8084.slt @@ -0,0 +1,24 @@ +# https://github.com/risingwavelabs/risingwave/issues/8084 + +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t (a int primary key); + +statement ok +create materialized view mv as select t1.* from t as t1 full join t as t2 on t1.a = t2.a; + +statement ok +insert into t values(null); + +# TODO: https://github.com/risingwavelabs/risingwave/issues/8084 +query I +select * from mv; +---- + +statement ok +drop materialized view mv; + +statement ok +drop table t; diff --git a/e2e_test/streaming/bug_fixes/issue_8570.slt b/e2e_test/streaming/bug_fixes/issue_8570.slt new file mode 100644 index 0000000000000..5a61fdc322c95 --- /dev/null +++ b/e2e_test/streaming/bug_fixes/issue_8570.slt @@ -0,0 +1,54 @@ +# https://github.com/risingwavelabs/risingwave/issues/8570 +# TopN cache invalidation issue + +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table t(x int); + +statement ok +create materialized view t_singleton as select * from t order by x limit 100; + +statement ok +create materialized view mv as select * from t_singleton order by x limit 1; + +statement ok +insert into t values (1), (2), (3), (4); + +statement ok +delete from t where x = 2; + +statement ok +insert into t values (5); + +statement ok +delete from t where x = 1; + +statement ok +insert into t values (6); + +statement ok +delete from t where x = 3; + +# Shouldn't be 5 +query I +select * from mv; +---- +4 + +statement ok +delete from t where x = 4; + +# Shouldn't panic +statement ok +insert into t values (1); + +statement ok +drop materialized view mv; + +statement ok +drop materialized view t_singleton; + +statement ok +drop table t; diff --git a/e2e_test/streaming/order_by.slt b/e2e_test/streaming/order_by.slt index 4a6e44c9a8191..c87b937b3d718 100644 --- a/e2e_test/streaming/order_by.slt +++ b/e2e_test/streaming/order_by.slt @@ -16,9 +16,6 @@ create materialized view mv2 as select * from t1 order by v1 limit 3; statement ok create materialized view mv3 as select * from t1 order by v1 limit 3 offset 1; -statement ok -flush; - query III rowsort select v1, v2, v3 from mv1; ---- @@ -43,13 +40,60 @@ select v1, v2, v3 from mv3; 5 1 4 statement ok -drop materialized view mv1 +insert into t1 values (NULL,0,0); + +statement ok +create materialized view mv4 as select * from t1 order by v1 desc limit 1; + +statement ok +create materialized view mv5 as select * from t1 order by v1 nulls first limit 1; + +statement ok +create materialized view mv6 as select * from t1 order by v1 nulls last limit 1; + +statement ok +create materialized view mv7 as select * from t1 order by v1 desc nulls last limit 1; + +query III +select v1, v2, v3 from mv4; +---- +NULL 0 0 + +query III +select v1, v2, v3 from mv5; +---- +NULL 0 0 + +query III +select v1, v2, v3 from mv6; +---- +0 2 3 + +query III +select v1, v2, v3 from mv7; +---- +9 8 1 + +statement ok +drop materialized view mv1; + +statement ok +drop materialized view mv2; + +statement ok +drop materialized view mv3; + +statement ok +drop materialized view mv4; + +statement ok +drop materialized view mv5; statement ok -drop materialized view mv2 +drop materialized view mv6; statement ok -drop materialized view mv3 +drop materialized view mv7; statement ok -drop table t1 +drop table t1; diff --git a/e2e_test/streaming/temporal_join.slt b/e2e_test/streaming/temporal_join.slt new file mode 100644 index 0000000000000..a3d594985757f --- /dev/null +++ b/e2e_test/streaming/temporal_join.slt @@ -0,0 +1,64 @@ +statement ok +SET RW_IMPLICIT_FLUSH TO true; + +statement ok +create table stream(id1 int, a1 int, b1 int) APPEND ONLY; + +statement ok +create table version(id2 int, a2 int, b2 int, primary key (id2)); + +statement ok +create materialized view v as select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF NOW() on id1 = id2 + +statement ok +insert into stream values(1, 11, 111); + +statement ok +insert into version values(1, 11, 111); + +statement ok +insert into stream values(1, 11, 111); + +statement ok +delete from version; + +query IIII rowsort +select * from v; +---- +1 11 1 11 +1 11 NULL NULL + +statement ok +insert into version values(2, 22, 222); + +statement ok +insert into stream values(2, 22, 222); + +query IIII rowsort +select * from v; +---- +1 11 1 11 +1 11 NULL NULL +2 22 2 22 + +statement ok +drop materialized view v; + +statement ok +create materialized view v as select id1, a1, id2, a2 from stream left join version FOR SYSTEM_TIME AS OF NOW() on id1 = id2 + +query IIII rowsort +select * from v; +---- +1 11 NULL NULL +1 11 NULL NULL +2 22 2 22 + +statement ok +drop materialized view v; + +statement ok +drop table stream; + +statement ok +drop table version; diff --git a/e2e_test/udf/python.slt b/e2e_test/udf/python.slt index 56683b6b6d9ee..560116e2e05fd 100644 --- a/e2e_test/udf/python.slt +++ b/e2e_test/udf/python.slt @@ -24,6 +24,13 @@ create function gcd(int, int, int) returns int language python as gcd3 using lin statement error exists create function gcd(int, int) returns int language python as gcd using link 'http://localhost:8815'; +# Create a table function. +statement ok +create function series(int) returns table (x int) language python as series using link 'http://localhost:8815'; + +statement ok +create function series2(int) returns table (x int, s varchar) language python as series2 using link 'http://localhost:8815'; + query I select int_42(); ---- @@ -39,6 +46,26 @@ select gcd(25, 15, 3); ---- 1 +query I +select series(5); +---- +0 +1 +2 +3 +4 + +# FIXME: support table function with multiple columns +# query IT +# select series2(5); +# ---- +# (0,0) +# (1,1) +# (2,2) +# (3,3) +# (4,4) + + # TODO: drop function without arguments # # Drop a function but ambiguous. diff --git a/e2e_test/udf/test.py b/e2e_test/udf/test.py index 4ef01fd9700dd..eabc10ad9290a 100644 --- a/e2e_test/udf/test.py +++ b/e2e_test/udf/test.py @@ -1,7 +1,8 @@ import sys +from typing import Iterator sys.path.append('src/udf/python') # noqa -from risingwave.udf import udf, UdfServer +from risingwave.udf import udf, udtf, UdfServer @udf(input_types=[], result_type='INT') @@ -21,9 +22,23 @@ def gcd3(x: int, y: int, z: int) -> int: return gcd(gcd(x, y), z) +@udtf(input_types='INT', result_types='INT') +def series(n: int) -> Iterator[int]: + for i in range(n): + yield i + + +@udtf(input_types=['INT'], result_types=['INT', 'VARCHAR']) +def series2(n: int) -> Iterator[tuple[int, str]]: + for i in range(n): + yield i, str(i) + + if __name__ == '__main__': server = UdfServer() server.add_function(int_42) server.add_function(gcd) server.add_function(gcd3) + server.add_function(series) + server.add_function(series2) server.serve() diff --git a/grafana/risingwave-dashboard.dashboard.py b/grafana/risingwave-dashboard.dashboard.py index e6593ad33a233..105f23f683ae0 100644 --- a/grafana/risingwave-dashboard.dashboard.py +++ b/grafana/risingwave-dashboard.dashboard.py @@ -520,10 +520,16 @@ def section_cluster_node(panels): [ panels.target( f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance)", - "{{job}} @ {{instance}}", - ) + "cpu - {{job}} @ {{instance}}", + ), + + panels.target( + f"sum(rate({metric('process_cpu_seconds_total')}[$__rate_interval])) by (job,instance) / avg({metric('process_cpu_core_num')}) by (job,instance)", + "cpu usage -{{job}} @ {{instance}}", + ), ], ), + panels.timeseries_count( "Meta Cluster", "", @@ -562,6 +568,16 @@ def section_compaction(outer_panels): ), ], ), + panels.timeseries_count( + "scale compactor core count", + "compactor core resource need to scale out", + [ + panels.target( + f"sum({metric('storage_compactor_suggest_core_count')})", + "suggest-core-count" + ), + ], + ), panels.timeseries_count( "Compaction Success & Failure Count", "num of compactions from each level to next level", @@ -768,7 +784,7 @@ def section_compaction(outer_panels): "Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size", [ panels.target( - f"sum by(le, job, instance)(rate({metric('compactor_sstable_avg_key_size_sum')}[$__rate_interval])) / sum by(le, job, instance)(rate({metric('state_store_sstable_avg_key_size_count')}[$__rate_interval]))", + f"sum by(le, job, instance)(rate({metric('compactor_sstable_avg_key_size_sum')}[$__rate_interval])) / sum by(le, job, instance)(rate({metric('compactor_sstable_avg_key_size_count')}[$__rate_interval]))", "avg_key_size - {{job}} @ {{instance}}", ), panels.target( @@ -778,6 +794,17 @@ def section_compaction(outer_panels): ], ), + panels.timeseries_count( + "Hummock Sstable Stat", + "Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count", + [ + panels.target( + f"sum by(le, job, instance)(rate({metric('compactor_sstable_distinct_epoch_count_sum')}[$__rate_interval])) / sum by(le, job, instance)(rate({metric('compactor_sstable_distinct_epoch_count_count')}[$__rate_interval]))", + "avg_epoch_count - {{job}} @ {{instance}}", + ), + ], + ), + panels.timeseries_latency( "Hummock Remote Read Duration", "Total time of operations which read from remote storage when enable prefetch", @@ -1351,11 +1378,7 @@ def section_streaming_actors(outer_panels): ), panels.target( f"rate({metric('stream_join_insert_cache_miss_count')}[$__rate_interval])", - "cache miss when insert {{actor_id}} {{side}}", - ), - panels.target( - f"rate({metric('stream_join_may_exist_true_count')}[$__rate_interval])", - "may_exist true when insert {{actor_id}} {{side}}", + "cache miss when insert{{actor_id}} {{side}}", ), ], ), @@ -1553,7 +1576,6 @@ def section_batch_exchange(outer_panels): ), ] - def section_frontend(outer_panels): panels = outer_panels.sub_panel() return [ diff --git a/grafana/risingwave-dashboard.json b/grafana/risingwave-dashboard.json index 1a57c67903a1d..6df502856165c 100644 --- a/grafana/risingwave-dashboard.json +++ b/grafana/risingwave-dashboard.json @@ -1 +1 @@ -{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"graphTooltip":0,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","query":"sum(worker_num) by (worker_type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"avg(process_resident_memory_bytes) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(process_cpu_seconds_total[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":9},"height":null,"hideTimeOverride":false,"id":5,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","query":"sum(meta_num) by (worker_addr,role)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":6,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} {{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_output_rows_counts[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"rate(partition_input_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","query":"(sum by (source_id)(rate(partition_input_bytes[$__rate_interval])))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(bytes)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","query":"(rate(partition_input_bytes[$__rate_interval]))/(1000*1000)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(bytes) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","query":"rate(stream_source_rows_per_barrier_counts[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","query":"all_barrier_nums","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","query":"in_flight_barrier_nums","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","query":"rate(meta_barrier_send_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","query":"rate(meta_barrier_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_duration_seconds_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","query":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count[$__rate_interval])))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":50},"height":null,"hideTimeOverride":false,"id":16,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","query":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":58},"height":null,"hideTimeOverride":false,"id":17,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","query":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","query":"rate(meta_barrier_wait_commit_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":66},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{executor_id}}","metric":"","query":"rate(stream_executor_row_count[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_output_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(actor_memory_usage[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_input_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{upstream_fragment_id}}","metric":"","query":"rate(stream_actor_input_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_barrier_time[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_processing_time[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_actor_execution_time[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_in_record_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_out_record_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":28,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":29,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":40},"height":null,"hideTimeOverride":false,"id":30,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration[$__rate_interval]) / rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_fast_poll_duration[$__rate_interval]) / rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":40},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":32,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":48},"height":null,"hideTimeOverride":false,"id":33,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration[$__rate_interval]) / rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_slow_poll_duration[$__rate_interval]) / rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":48},"height":null,"hideTimeOverride":false,"id":34,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":35,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":56},"height":null,"hideTimeOverride":false,"id":36,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration[$__rate_interval]) / rate(stream_actor_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_poll_duration[$__rate_interval]) / rate(stream_actor_poll_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":56},"height":null,"hideTimeOverride":false,"id":37,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":64},"height":null,"hideTimeOverride":false,"id":39,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration[$__rate_interval]) / rate(stream_actor_idle_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_idle_duration[$__rate_interval]) / rate(stream_actor_idle_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":64},"height":null,"hideTimeOverride":false,"id":40,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":41,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":72},"height":null,"hideTimeOverride":false,"id":42,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration[$__rate_interval]) / rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_actor_scheduled_duration[$__rate_interval]) / rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":43,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss {{actor_id}} {{side}}","metric":"","query":"rate(stream_join_lookup_miss_count[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{actor_id}} {{side}}","metric":"","query":"rate(stream_join_lookup_total_count[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert {{actor_id}} {{side}}","metric":"","query":"rate(stream_join_insert_cache_miss_count[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_may_exist_true_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"may_exist true when insert {{actor_id}} {{side}}","metric":"","query":"rate(stream_join_may_exist_true_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, actor_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum[$__rate_interval])) / sum by(le,actor_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","query":"sum by(le, actor_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum[$__rate_interval])) / sum by(le,actor_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":45,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"rate(stream_join_actor_input_waiting_duration_ns[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":46,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","query":"rate(stream_join_match_duration_ns[$__rate_interval]) / 1000000000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":47,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entries","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_entries","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Entries","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":48,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_rows","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_rows","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":49,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_estimated_size","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","query":"stream_join_cached_estimated_size","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Estimated Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_miss_count[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{actor_id}}","metric":"","query":"rate(stream_agg_lookup_total_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":51,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_miss_count[$__rate_interval])","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups {{actor_id}}","metric":"","query":"rate(stream_agg_chunk_lookup_total_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":112},"height":null,"hideTimeOverride":false,"id":52,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_keys","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","query":"stream_agg_cached_keys","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":67},"height":null,"hideTimeOverride":false,"id":53,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":54,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_send_size[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":55,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","query":"rate(stream_exchange_frag_recv_size[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":68},"height":null,"hideTimeOverride":false,"id":56,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":57,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","query":"sum(user_compute_error_count) by (error_type, error_msg, fragment_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":58,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","query":"sum(user_source_error_count) by (error_type, error_msg, fragment_id, table_id, executor_name)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":69},"height":null,"hideTimeOverride":false,"id":59,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":60,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_exchange_recv_row_number","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","query":"batch_task_exchange_recv_row_number","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":61,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_task_num","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":70},"height":null,"hideTimeOverride":false,"id":62,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":71},"height":null,"hideTimeOverride":false,"id":63,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":71},"height":null,"hideTimeOverride":false,"id":64,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_sst_store_block_request_counts[$__rate_interval])) by (job, instance, table_id, type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":79},"height":null,"hideTimeOverride":false,"id":65,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_duration_count[$__rate_interval])) by (job,instanc,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_range_reverse_scan_duration_count[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"backward scan - {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_range_reverse_scan_duration_count[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_shared_buffer_hit_counts[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_in_process_counts[$__rate_interval])) by(job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":79},"height":null,"hideTimeOverride":false,"id":66,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":87},"height":null,"hideTimeOverride":false,"id":67,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_duration_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_scan_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_scan_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":87},"height":null,"hideTimeOverride":false,"id":68,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.9, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.9, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.999, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.999, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":95},"height":null,"hideTimeOverride":false,"id":69,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":95},"height":null,"hideTimeOverride":false,"id":70,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.999, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":103},"height":null,"hideTimeOverride":false,"id":71,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_get_key_size_sum[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":103},"height":null,"hideTimeOverride":false,"id":72,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_iter_size_sum[$__rate_interval])) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":111},"height":null,"hideTimeOverride":false,"id":73,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_may_exist_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_may_exist_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_may_exist_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_may_exist_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - MayExist","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":111},"height":null,"hideTimeOverride":false,"id":74,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter true negative - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read_req bloom filter positive - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read_req check bloom filter - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Bloom Filter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":119},"height":null,"hideTimeOverride":false,"id":75,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_iter_scan_key_counts[$__rate_interval])) by (instance, type, table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":119},"height":null,"hideTimeOverride":false,"id":76,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)) / (sum(rate(state_bloom_filter_check_counts[$__rate_interval])) by (job,instance,table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter miss rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"1 - (sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)) / (sum(rate(state_bloom_filter_check_counts[$__rate_interval])) by (job,instance,table_id,type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total'}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total'}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total'}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total'}[$__rate_interval])) by (job,instance,table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(file_cache_miss[$__rate_interval])) by (instance)) / (sum(rate(file_cache_latency_count{op='get'}[$__rate_interval])) by (instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss rate @ {{instance}}","metric":"","query":"(sum(rate(file_cache_miss[$__rate_interval])) by (instance)) / (sum(rate(file_cache_latency_count{op='get'}[$__rate_interval])) by (instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (((sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter filter rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"1 - (((sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (((sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","query":"1 - (((sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":" Filter/Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":127},"height":null,"hideTimeOverride":false,"id":77,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p90 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":127},"height":null,"hideTimeOverride":false,"id":78,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_duration_count[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_sync_duration_count[$__rate_interval])) by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":135},"height":null,"hideTimeOverride":false,"id":79,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":135},"height":null,"hideTimeOverride":false,"id":80,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_tuple_counts[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","query":"sum(rate(state_store_write_batch_tuple_counts[$__rate_interval])) by (job,instance,table_id)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":143},"height":null,"hideTimeOverride":false,"id":81,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum[$__rate_interval]))by(job,instance) / sum(rate(state_store_write_batch_size_count[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum(rate(state_store_write_batch_size_sum[$__rate_interval]))by(job,instance) / sum(rate(state_store_write_batch_size_count[$__rate_interval]))by(job,instance,table_id)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size[$__rate_interval]))by(job,instance) / sum(rate(state_store_shared_buffer_to_sstable_size_count[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_shared_buffer_to_sstable_size[$__rate_interval]))by(job,instance) / sum(rate(state_store_shared_buffer_to_sstable_size_count[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":143},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(state_store_sync_size_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":151},"height":null,"hideTimeOverride":false,"id":83,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_meta_cache_size) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","query":"avg(state_store_block_cache_size) by (job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_limit_memory_size) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}}","metric":"","query":"sum(state_store_limit_memory_size) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":151},"height":null,"hideTimeOverride":false,"id":84,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":159},"height":null,"hideTimeOverride":false,"id":85,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":167},"height":null,"hideTimeOverride":false,"id":86,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"num of SSTs in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_sst_num) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SST Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"KBs total file bytes in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":88,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"sum(storage_level_total_file_size) by (instance, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs level sst","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compactions from each level to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","query":"sum(storage_level_compact_frequency) by (compactor, group, task_type, result)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success & Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compaction task which does not trigger","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":90,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","query":"sum(rate(storage_skip_compact_frequency[$__rate_interval])) by (level, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compactions from each level to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":91,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","query":"avg(storage_compact_task_pending_num) by(job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total time of compact that have been issued to state store","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":92,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","query":"sum by(le)(rate(compactor_compact_task_duration_sum[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","query":"sum by(le)(rate(state_store_compact_sst_duration_sum[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":93,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(storage_level_compact_write[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes[$__rate_interval]))by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}} @ {{instance}}","metric":"","query":"sum(rate(compactor_write_build_l0_bytes[$__rate_interval]))by (job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs written into next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":94,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","query":"sum(storage_level_compact_write) by (job)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","query":"sum(compactor_write_build_l0_bytes) by (job)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs written into next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":95,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write) / sum(state_store_write_build_l0_bytes)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","query":"sum(storage_level_compact_write) / sum(state_store_write_build_l0_bytes)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs to be merged to next level in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":96,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","query":"storage_level_compact_cnt","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SST Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":97,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","query":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read from Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":98,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","query":"sum(rate(storage_level_compact_read_curr[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read from Current Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":99,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_sstn_curr[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","query":"sum(rate(storage_level_compact_read_sstn_curr[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read from Current Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":100,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} write","metric":"","query":"sum(rate(storage_level_compact_write[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Written to Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":101,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write_sstn[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} write","metric":"","query":"sum(rate(storage_level_compact_write_sstn[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Written to Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":102,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_sstn_next[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","query":"sum(rate(storage_level_compact_read_sstn_next[$__rate_interval])) by (le, level_index)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read from Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":103,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":104,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_sstable_avg_key_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_sstable_avg_key_size_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","query":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":105,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":106,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","query":"sum(rate(compactor_iter_scan_key_counts[$__rate_interval])) by (instance, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":107,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","query":"sum(storage_compact_pending_bytes) by (instance, group)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":108,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","query":"sum(storage_compact_level_compression_ratio) by (instance, group, level, algorithm)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":168},"height":null,"hideTimeOverride":false,"id":109,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":110,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_read_bytes[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_write_bytes[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":111,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","query":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":112,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count[$__rate_interval])) by (le, type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete'}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'upload|delete'}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata'}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata'}[$__rate_interval])) by (le, media_type, job, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":113,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":114,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","query":"sum(rate(object_store_failure_count[$__rate_interval])) by (instance, job, type)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":115,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","query":"sum(object_store_read_bytes) * 0.01 / 1000 / 1000 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete'}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete'}) * 0.0004 / 1000","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list'}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","query":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list'}) * 0.005 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":116,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","query":"sum(storage_level_total_file_size) by (instance) * 0.023 / 1000 / 1000","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":169},"height":null,"hideTimeOverride":false,"id":117,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":118,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","query":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_disk_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache disk {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_disk_latency_count[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":119,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":120,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_disk_bytes[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"disk {{op}} @ {{instance}}","metric":"","query":"sum(rate(file_cache_disk_bytes[$__rate_interval])) by (op, instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":121,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk read entry - {{op}} @ {{instance}}","metric":"","query":"histogram_quantile(1.0, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Disk IO Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":170},"height":null,"hideTimeOverride":false,"id":122,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":123,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p999 - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(0.999, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","query":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":124,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","query":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","query":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p999 - {{method}}","metric":"","query":"histogram_quantile(0.999, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","query":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":125,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","query":"storage_version_size","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":126,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","query":"storage_current_version_id","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","query":"storage_checkpoint_version_id","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","query":"storage_min_pinned_version_id","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","query":"storage_min_safepoint_version_id","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":127,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","query":"storage_max_committed_epoch","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","query":"storage_safe_epoch","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","query":"storage_min_pinned_epoch","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":128,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size'}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_size'}/1024","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size'}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_value_size'}/1024","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":129,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count'}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","query":"storage_version_stats{metric='total_key_count'}","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"total number of SSTs that is no longer referenced by versions but is not yet deleted from storage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":130,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_ssts_count","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stale SST total number","metric":"","query":"storage_stale_ssts_count","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Stale SST Total Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":171},"height":null,"hideTimeOverride":false,"id":131,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":132,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","query":"backup_job_count","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":133,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","query":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","query":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p999 - {{state}}","metric":"","query":"histogram_quantile(0.999, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","query":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":172},"height":null,"hideTimeOverride":false,"id":134,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":135,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":136,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":137,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":173},"height":null,"hideTimeOverride":false,"id":138,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":139,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":140,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":174},"height":null,"hideTimeOverride":false,"id":141,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":142,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":143,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":144,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":175},"height":null,"hideTimeOverride":false,"id":145,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":146,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":147,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":148,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":149,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","query":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","query":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","query":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","query":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval]))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":176},"height":null,"hideTimeOverride":false,"id":150,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":151,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_report_compaction_task_counts[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":152,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","query":"sum(irate(state_store_unpin_version_before_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":153,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","query":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_pin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","query":"sum(irate(state_store_unpin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":154,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_pin_snapshot_counts[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_unpin_snapshot_counts[$__rate_interval])) by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":155,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_sum[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":156,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","query":"sum(irate(state_store_get_new_sst_ids_latency_counts[$__rate_interval]))by(job,instance)","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":157,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","query":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","query":"sum(irate(state_store_report_compaction_task_latency_sum[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","query":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":177},"height":null,"hideTimeOverride":false,"id":158,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":159,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(frontend_query_counter_local_execution[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per second in Loacl Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":160,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(distributed_completed_query_counter[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per second in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":161,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","query":"distributed_running_query_num","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":162,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","query":"distributed_rejected_query_counter","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":163,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","query":"distributed_completed_query_counter","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":164,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":165,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","query":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":178},"height":null,"hideTimeOverride":false,"id":166,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":167,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"rate(lru_runtime_loop_count[$__rate_interval])","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":168,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_watermark_step","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":169,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms - lru_current_watermark_time_ms","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"lru_physical_now_ms - lru_current_watermark_time_ms","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":170,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"jemalloc_allocated_bytes","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":171,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_total_mem_usage","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"stream_total_mem_usage","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by streaming","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":172,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem_usage","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","query":"batch_total_mem_usage","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by batch","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Ecy3uV1nz","version":0} +{"__inputs":[],"annotations":{"list":[]},"description":"RisingWave Dashboard","editable":true,"gnetId":null,"hideControls":false,"id":null,"links":[],"panels":[{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":1,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Cluster Node","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":1},"height":null,"hideTimeOverride":false,"id":2,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(worker_num) by (worker_type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_type}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":1},"height":null,"hideTimeOverride":false,"id":3,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(process_resident_memory_bytes) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node Memory","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":9},"height":null,"hideTimeOverride":false,"id":4,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(process_cpu_seconds_total[$__rate_interval])) by (job,instance) / avg(process_cpu_core_num) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cpu usage -{{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Node CPU","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":9},"height":null,"hideTimeOverride":false,"id":5,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(meta_num) by (worker_addr,role)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{worker_addr}} @ {{role}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Meta Cluster","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":17},"height":null,"hideTimeOverride":false,"id":6,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":18},"height":null,"hideTimeOverride":false,"id":7,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_output_rows_counts[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_name}} {{source_id}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":18},"height":null,"hideTimeOverride":false,"id":8,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(partition_input_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":26},"height":null,"hideTimeOverride":false,"id":9,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum by (source_id)(rate(partition_input_bytes[$__rate_interval])))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"source={{source_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(bytes)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"MB/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":26},"height":null,"hideTimeOverride":false,"id":10,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(rate(partition_input_bytes[$__rate_interval]))/(1000*1000)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} partition={{partition}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(bytes) Per Partition","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":34},"height":null,"hideTimeOverride":false,"id":11,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_source_rows_per_barrier_counts[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"actor={{actor_id}} source={{source_id}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Throughput(rows) per barrier","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":34},"height":null,"hideTimeOverride":false,"id":12,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"all_barrier_nums","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"all_barrier","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"in_flight_barrier_nums","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"in_flight_barrier","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":42},"height":null,"hideTimeOverride":false,"id":13,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_p999","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_send_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_pmax","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_send_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_send_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_send_latency_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Send Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":42},"height":null,"hideTimeOverride":false,"id":14,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_p999","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_pmax","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_latency_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":50},"height":null,"hideTimeOverride":false,"id":15,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_p999","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_inflight_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_pmax","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"max(sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_inflight_duration_seconds_count[$__rate_interval])))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_inflight_latency_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier In-Flight Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":50},"height":null,"hideTimeOverride":false,"id":16,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p50 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p90 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p99 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_p999 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_barrier_sync_storage_duration_seconds_bucket[$__rate_interval])) by (le,instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_pmax - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_sum[$__rate_interval])) / sum by(le, instance)(rate(stream_barrier_sync_storage_duration_seconds_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_sync_latency_avg - {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Sync Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":58},"height":null,"hideTimeOverride":false,"id":17,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_p999","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_barrier_wait_commit_duration_seconds_bucket[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_latency_pmax","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(meta_barrier_wait_commit_duration_seconds_sum[$__rate_interval]) / rate(meta_barrier_wait_commit_duration_seconds_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"barrier_wait_commit_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Barrier Wait Commit Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":66},"height":null,"hideTimeOverride":false,"id":18,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"rows/s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":19,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_executor_row_count[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{executor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Executor Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":20,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_output_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Backpressure","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":21,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(actor_memory_usage[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Memory Usage","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":22,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_input_buffer_blocking_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}->{{upstream_fragment_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":23,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_barrier_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Barrier Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":24,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_processing_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Processing Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":25,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_actor_execution_time[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Execution Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":26,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_in_record_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Input Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":27,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_out_record_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Actor Output Row","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":28,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":29,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":40},"height":null,"hideTimeOverride":false,"id":30,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_fast_poll_duration[$__rate_interval]) / rate(stream_actor_fast_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Fast Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":40},"height":null,"hideTimeOverride":false,"id":31,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":32,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":48},"height":null,"hideTimeOverride":false,"id":33,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_slow_poll_duration[$__rate_interval]) / rate(stream_actor_slow_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Slow Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":48},"height":null,"hideTimeOverride":false,"id":34,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":35,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":56},"height":null,"hideTimeOverride":false,"id":36,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_poll_duration[$__rate_interval]) / rate(stream_actor_poll_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Poll Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":56},"height":null,"hideTimeOverride":false,"id":37,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":38,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":64},"height":null,"hideTimeOverride":false,"id":39,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_idle_duration[$__rate_interval]) / rate(stream_actor_idle_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Idle Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":64},"height":null,"hideTimeOverride":false,"id":40,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Total Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":41,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":72},"height":null,"hideTimeOverride":false,"id":42,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_actor_scheduled_duration[$__rate_interval]) / rate(stream_actor_scheduled_cnt[$__rate_interval]) > 0","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Tokio: Actor Scheduled Avg Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":43,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss {{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_insert_cache_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss when insert{{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Cache","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":44,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(stream_join_barrier_align_duration_bucket[$__rate_interval])) by (le, actor_id, wait_side, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, actor_id, wait_side, job, instance)(rate(stream_join_barrier_align_duration_sum[$__rate_interval])) / sum by(le,actor_id,wait_side,job,instance) (rate(stream_join_barrier_align_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg {{actor_id}}.{{wait_side}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Executor Barrier Align","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":45,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_actor_input_waiting_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Input Blocking Time Ratio","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":46,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_join_match_duration_ns[$__rate_interval]) / 1000000000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}.{{side}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Actor Match Duration Per Second","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":96},"height":null,"hideTimeOverride":false,"id":47,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_entries","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Entries","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":96},"height":null,"hideTimeOverride":false,"id":48,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_rows","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Rows","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":104},"height":null,"hideTimeOverride":false,"id":49,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_join_cached_estimated_size","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}} {{side}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Join Cached Estimated Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":104},"height":null,"hideTimeOverride":false,"id":50,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"cache miss {{actor_id}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"total lookups {{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each Key/State","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":0,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":112},"height":null,"hideTimeOverride":false,"id":51,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_miss_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level cache miss {{actor_id}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_agg_chunk_lookup_total_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"chunk-level total lookups {{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Executor Cache Statistics For Each StreamChunk","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":112},"height":null,"hideTimeOverride":false,"id":52,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_agg_cached_keys","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{actor_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Aggregation Cached Keys","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Actors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":67},"height":null,"hideTimeOverride":false,"id":53,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":54,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_send_size[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Send Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":55,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(stream_exchange_frag_recv_size[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{up_fragment_id}}->{{down_fragment_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fragment-level Remote Exchange Recv Throughput","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Streaming Exchange","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":68},"height":null,"hideTimeOverride":false,"id":56,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":57,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_compute_error_count) by (error_type, error_msg, fragment_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: fragment_id={{fragment_id}})","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compute Errors by Type","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":58,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(user_source_error_count) by (error_type, error_msg, fragment_id, table_id, executor_name)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{error_type}}: {{error_msg}} ({{executor_name}}: table_id={{table_id}}, fragment_id={{fragment_id}})","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Source Errors by Type","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"User Streaming Errors","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":69},"height":null,"hideTimeOverride":false,"id":59,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":60,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_exchange_recv_row_number","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{query_id}} : {{source_stage_id}}.{{source_task_id}} -> {{target_stage_id}}.{{target_task_id}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Exchange Recv Row Number","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"row"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":61,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_task_num","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Batch Mpp Task Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Batch Metrics","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":false,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":70},"height":null,"hideTimeOverride":false,"id":62,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":71},"height":null,"hideTimeOverride":false,"id":63,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Build and Sync Sstable Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":71},"height":null,"hideTimeOverride":false,"id":64,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sst_store_block_request_counts[$__rate_interval])) by (job, instance, table_id, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{table_id}} @ {{type}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":79},"height":null,"hideTimeOverride":false,"id":65,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_duration_count[$__rate_interval])) by (job,instanc,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_range_reverse_scan_duration_count[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"backward scan - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_shared_buffer_hit_counts[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer hit - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_in_process_counts[$__rate_interval])) by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":79},"height":null,"hideTimeOverride":false,"id":66,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_get_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_get_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":87},"height":null,"hideTimeOverride":false,"id":67,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"create_iter_time avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_scan_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_scan_iter_duration_sum[$__rate_interval])) / sum by(le, job,instance) (rate(state_store_iter_scan_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pure_scan_time avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":87},"height":null,"hideTimeOverride":false,"id":68,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.9, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.99, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(0.999, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_get_key_size_bucket[$__rate_interval])) by (le, job, instance, table_id)) + histogram_quantile(1.0, sum(rate(state_store_get_value_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":95},"height":null,"hideTimeOverride":false,"id":69,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_size_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Size - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":95},"height":null,"hideTimeOverride":false,"id":70,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p999 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_item_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Item Count - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":103},"height":null,"hideTimeOverride":false,"id":71,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_get_key_size_sum[$__rate_interval])) by(job, instance) + sum(rate(state_store_get_value_size_sum[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Get","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":103},"height":null,"hideTimeOverride":false,"id":72,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_size_sum[$__rate_interval])) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Throughput - Iter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":111},"height":null,"hideTimeOverride":false,"id":73,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_may_exist_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_may_exist_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_may_exist_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{table_id}} {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Duration - MayExist","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":111},"height":null,"hideTimeOverride":false,"id":74,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter true negative - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter false positive count - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read_req bloom filter positive - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read_req check bloom filter - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Bloom Filter","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":119},"height":null,"hideTimeOverride":false,"id":75,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_iter_scan_key_counts[$__rate_interval])) by (instance, type, table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{table_id}} @ {{type}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Iter keys flow","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":119},"height":null,"hideTimeOverride":false,"id":76,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (sum(rate(state_store_bloom_filter_true_negative_counts[$__rate_interval])) by (job,instance,table_id,type)) / (sum(rate(state_bloom_filter_check_counts[$__rate_interval])) by (job,instance,table_id,type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"bloom filter miss rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='meta_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='meta_total'}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(state_store_sst_store_block_request_counts{type='data_miss'}[$__rate_interval])) by (job,instance,table_id)) / (sum(rate(state_store_sst_store_block_request_counts{type='data_total'}[$__rate_interval])) by (job,instance,table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"block cache miss rate - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"(sum(rate(file_cache_miss[$__rate_interval])) by (instance)) / (sum(rate(file_cache_latency_count{op='get'}[$__rate_interval])) by (instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss rate @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (((sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_check_bloom_filter_counts[$__rate_interval])) by (job,instance,table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter filter rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"1 - (((sum(rate(state_store_read_req_positive_but_non_exist_counts[$__rate_interval])) by (job,instance,table_id,type))) / (sum(rate(state_store_read_req_bloom_filter_positive_counts[$__rate_interval])) by (job,instance,table_id,type)))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read req bloom filter false positive rate - {{table_id}} - {{type}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":" Filter/Cache Miss Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":127},"height":null,"hideTimeOverride":false,"id":77,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p90 - {{table_id}} @ {{job}} @ {{type}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts p99 - {{table_id}} @ {{job}} @ {{type}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_merge_sstable_counts_bucket[$__rate_interval])) by (le, job, table_id, type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts pmax - {{table_id}} @ {{job}} @ {{type}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_iter_merge_sstable_counts_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"# merged ssts avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Read Merged SSTs","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":127},"height":null,"hideTimeOverride":false,"id":78,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_duration_count[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write batch - {{table_id}} @ {{job}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_sync_duration_count[$__rate_interval])) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"l0 - {{job}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":135},"height":null,"hideTimeOverride":false,"id":79,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_batch_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id)(rate(state_store_write_batch_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to shared_buffer avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_write_shared_buffer_sync_time_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_sum[$__rate_interval])) / sum by(le, job, instance)(rate(state_store_write_shared_buffer_sync_time_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write to object_store - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":135},"height":null,"hideTimeOverride":false,"id":80,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_tuple_counts[$__rate_interval])) by (job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write_batch_kv_pair_count - {{table_id}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Item Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":143},"height":null,"hideTimeOverride":false,"id":81,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(state_store_write_batch_size_sum[$__rate_interval]))by(job,instance) / sum(rate(state_store_write_batch_size_count[$__rate_interval]))by(job,instance,table_id)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"shared_buffer - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_shared_buffer_to_sstable_size[$__rate_interval]))by(job,instance) / sum(rate(state_store_shared_buffer_to_sstable_size_count[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"sync - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Write Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":143},"height":null,"hideTimeOverride":false,"id":82,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_sync_size_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(state_store_sync_size_sum[$__rate_interval])) / sum by(le, job, instance) (rate(state_store_sync_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Checkpoint Sync Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":151},"height":null,"hideTimeOverride":false,"id":83,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_meta_cache_size) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"meta cache - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(state_store_block_cache_size) by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"data cache - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(state_store_limit_memory_size) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"uploading memory - {{job}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Cache Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":151},"height":null,"hideTimeOverride":false,"id":84,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(batch_row_seq_scan_next_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_sum[$__rate_interval])) / sum by(le, job, instance) (rate(batch_row_seq_scan_next_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"row_seq_scan next avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Row SeqScan Next Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":159},"height":null,"hideTimeOverride":false,"id":85,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p50 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration p99 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_iter_fetch_meta_duration_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_sum[$__rate_interval])) / sum by(le, job, instance, table_id) (rate(state_store_iter_fetch_meta_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"fetch_meta_duration avg - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Fetch Meta Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":167},"height":null,"hideTimeOverride":false,"id":86,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"num of SSTs in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":87,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_sst_num) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"SST Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"KBs total file bytes in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":88,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size) by (instance, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs level sst","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"compactor core resource need to scale out","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":89,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compactor_suggest_core_count)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"suggest-core-count","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"scale compactor core count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compactions from each level to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":90,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_frequency) by (compactor, group, task_type, result)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{task_type}} - {{result}} - group-{{group}} @ {{compactor}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Success & Failure Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compaction task which does not trigger","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":91,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_skip_compact_frequency[$__rate_interval])) by (level, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{level}}-{{type}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Skip Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of compactions from each level to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":92,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"avg(storage_compact_task_pending_num) by(job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compactor_task_split_count - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Running Task Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total time of compact that have been issued to state store","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":93,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_compact_task_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_compact_sst_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_get_table_id_total_time_duration_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get-table-id pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(compactor_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(compactor_compact_task_duration_sum[$__rate_interval])) / sum by(le)(rate(compactor_compact_task_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-task avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le)(rate(state_store_compact_sst_duration_sum[$__rate_interval])) / sum by(le)(rate(state_store_compact_sst_duration_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact-key-range avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"KBs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":94,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by(job,instance) + sum(rate(storage_level_compact_read_curr[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_write_build_l0_bytes[$__rate_interval]))by (job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs written into next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":32},"height":null,"hideTimeOverride":false,"id":95,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(compactor_write_build_l0_bytes) by (job)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"flush - {{job}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs written into next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":32},"height":null,"hideTimeOverride":false,"id":96,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_compact_write) / sum(state_store_write_build_l0_bytes)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write amplification","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compaction Write Amplification","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs to be merged to next level in each level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":40},"height":null,"hideTimeOverride":false,"id":97,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_level_compact_cnt","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compacting SST Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":40},"height":null,"hideTimeOverride":false,"id":98,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_next[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read from Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":48},"height":null,"hideTimeOverride":false,"id":99,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_curr[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Read from Current Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":48},"height":null,"hideTimeOverride":false,"id":100,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_sstn_curr[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read from Current Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":56},"height":null,"hideTimeOverride":false,"id":101,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} write","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"KBs Written to Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":56},"height":null,"hideTimeOverride":false,"id":102,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_write_sstn[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} write","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Written to Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"num of SSTs read from next level during history compactions to next level","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":64},"height":null,"hideTimeOverride":false,"id":103,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(storage_level_compact_read_sstn_next[$__rate_interval])) by (le, level_index)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"L{{level_index}} read","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Count of SSTs Read from Next Level","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total bytes gotten from sstable_bloom_filter, for observing bloom_filter size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":64},"height":null,"hideTimeOverride":false,"id":104,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_bloom_filter_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_meta - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_file_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_file_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_file - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total bytes gotten from sstable_avg_key_size, for observing sstable_avg_key_size","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":72},"height":null,"hideTimeOverride":false,"id":105,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_key_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_key_size - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_avg_value_size_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_value_size - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Item Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":72},"height":null,"hideTimeOverride":false,"id":106,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_sum[$__rate_interval])) / sum by(le, job, instance)(rate(compactor_sstable_distinct_epoch_count_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"avg_epoch_count - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Sstable Stat","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"Total time of operations which read from remote storage when enable prefetch","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":80},"height":null,"hideTimeOverride":false,"id":107,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io p90 - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(state_store_remote_read_time_per_task_bucket[$__rate_interval])) by (le, job, instance, table_id))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"remote-io pmax - {{table_id}} @ {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Hummock Remote Read Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":80},"height":null,"hideTimeOverride":false,"id":108,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(compactor_iter_scan_key_counts[$__rate_interval])) by (instance, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"iter keys flow - {{type}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Compactor Iter keys","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"bytes of Lsm tree needed to reach balance","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":88},"height":null,"hideTimeOverride":false,"id":109,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_pending_bytes) by (instance, group)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"compact pending bytes - {{group}} @ {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Compact Pending Bytes","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"compression ratio of each level of the lsm tree","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"percentunit"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":88},"height":null,"hideTimeOverride":false,"id":110,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_compact_level_compression_ratio) by (instance, group, level, algorithm)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"lsm compression ratio - cg{{group}} @ L{{level}} - {{algorithm}} {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lsm Level Compression Ratio","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Compaction","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":168},"height":null,"hideTimeOverride":false,"id":111,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":112,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_read_bytes[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"read - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_write_bytes[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"write - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":113,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_latency_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum by(le, type, job, instance)(rate(object_store_operation_latency_sum[$__rate_interval])) / sum by(le, type, job, instance) (rate(object_store_operation_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} avg - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Duration","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":114,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count[$__rate_interval])) by (le, type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'upload|delete'}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-write - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_operation_latency_count{type=~'read|readv|list|metadata'}[$__rate_interval])) by (le, media_type, job, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{media_type}}-read - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":115,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(object_store_operation_bytes_bucket[$__rate_interval])) by (le, type, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} pmax - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":116,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(object_store_failure_count[$__rate_interval])) by (instance, job, type)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"{{type}} - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Operation Failure Rate","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":117,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_read_bytes) * 0.01 / 1000 / 1000 / 1000","format":"time_series","hide":true,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"(Cross Region) Data Transfer Cost","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'read|streaming_read_start|delete'}) * 0.0004 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GET, SELECT, and all other Requests Cost","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(object_store_operation_latency_count{type=~'upload|streaming_upload_start|s3_upload_part|streaming_upload_finish|delete_objects|list'}) * 0.005 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"PUT, COPY, POST, LIST Requests Cost","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Realtime)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"$"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":118,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(storage_level_total_file_size) by (instance) * 0.023 / 1000 / 1000","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Monthly Storage Cost","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Estimated S3 Cost (Monthly)","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Object Storage","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":169},"height":null,"hideTimeOverride":false,"id":119,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"ops"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":120,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_miss[$__rate_interval])) by (instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache miss @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_disk_latency_count[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"file cache disk {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Ops","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":121,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_latency_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Bps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":122,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(rate(file_cache_disk_bytes[$__rate_interval])) by (op, instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"disk {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Throughput","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":123,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_io_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - file cache disk read entry - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(file_cache_disk_read_entry_size_bucket[$__rate_interval])) by (le, op, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pmax - file cache disk read entry - {{op}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Disk IO Size","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Tiered Cache","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":170},"height":null,"hideTimeOverride":false,"id":124,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":125,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p50 - {{lock_type}} @ {{lock_name}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p99 - {{lock_type}} @ {{lock_name}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time p999 - {{lock_type}} @ {{lock_name}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(hummock_manager_lock_time_bucket[$__rate_interval])) by (le, lock_name, lock_type))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Lock Time pmax - {{lock_type}} @ {{lock_name}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Lock Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":126,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p50 - {{method}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p99 - {{method}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time p999 - {{method}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(meta_hummock_manager_real_process_time_bucket[$__rate_interval])) by (le, method))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Real Process Time pmax - {{method}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Real Process Time","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":127,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_size","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"version size","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":128,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_current_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"current version id","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_checkpoint_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"checkpoint version id","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned version id","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_safepoint_version_id","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min safepoint version id","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Version Id","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":129,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_max_committed_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"max committed epoch","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_safe_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"safe epoch","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_min_pinned_epoch","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"min pinned epoch","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Epoch","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"kbytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":130,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_size'}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_value_size'}/1024","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Size","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":131,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_version_stats{metric='total_key_count'}","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"table{{table_id}} {{metric}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Table KV Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"total number of SSTs that is no longer referenced by versions but is not yet deleted from storage","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":24},"height":null,"hideTimeOverride":false,"id":132,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"storage_stale_ssts_count","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"stale SST total number","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Stale SST Total Number","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":171},"height":null,"hideTimeOverride":false,"id":133,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":134,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"backup_job_count","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"job count","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":135,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p50 - {{state}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p99 - {{state}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.999, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time p999 - {{state}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(1.0, sum(rate(backup_job_latency_bucket[$__rate_interval])) by (le, state))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Job Process Time pmax - {{state}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Job Process Time","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Backup Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":172},"height":null,"hideTimeOverride":false,"id":136,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":137,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Create'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Create'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Create'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Create_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Create latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":138,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/Drop'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/Drop'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/Drop'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Drop_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Drop latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":139,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.CatalogService/GetCatalog'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.CatalogService/GetCatalog'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetCatalog_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetCatalog latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Catalog Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":173},"height":null,"hideTimeOverride":false,"id":140,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":141,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/AddWorkerNode'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"AddWorkerNode_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"AddWorkerNode latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":142,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.ClusterService/ListAllNodes'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ListAllNodes_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ListAllNodes latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Cluster Service","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":174},"height":null,"hideTimeOverride":false,"id":143,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":144,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/CreateMaterializedView'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"CreateMaterializedView_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"CreateMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":145,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/DropMaterializedView'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"DropMaterializedView_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"DropMaterializedView latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":146,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.StreamManagerService/Flush'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.StreamManagerService/Flush'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"Flush_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Flush latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Stream Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":175},"height":null,"hideTimeOverride":false,"id":147,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":148,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinVersionBefore'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinVersionBefore_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinVersionBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":8,"y":0},"height":null,"hideTimeOverride":false,"id":149,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/UnpinSnapshotBefore'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"UnpinSnapshotBefore_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"UnpinSnapshotBefore latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":16,"y":0},"height":null,"hideTimeOverride":false,"id":150,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/ReportCompactionTasks'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"ReportCompactionTasks_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"ReportCompactionTasks latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":8,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":151,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p50","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p90","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(meta_grpc_duration_seconds_bucket{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) by (le))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_p99","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(meta_grpc_duration_seconds_sum{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval])) / sum(irate(meta_grpc_duration_seconds_count{path='/meta.HummockManagerService/GetNewSstIds'}[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"GetNewSstIds_avg","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"GetNewSstIds latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC Meta: Hummock Manager","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":176},"height":null,"hideTimeOverride":false,"id":152,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":153,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_counts - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":154,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p50 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p99 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_version_before_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_version_before_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_version_before_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_version_before_latency_p90 - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"version_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":155,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p50 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_p99 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(irate(state_store_pin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latencyp90 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_pin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_latency_avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p50 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_unpin_version_snapshot_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p99 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_latency_sum[$__rate_interval])) / sum(irate(state_store_unpin_snapshot_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_unpin_snapshot_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_latency_p90 - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":156,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_pin_snapshot_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"pin_snapshot_counts - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_unpin_snapshot_counts[$__rate_interval])) by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"unpin_snapshot_counts - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"snapshot_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":157,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p50 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p99 - {{instance}} ","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_sum[$__rate_interval])) / sum(irate(state_store_get_new_sst_ids_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_get_new_sst_ids_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_latency_p90 - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_latency","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":158,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_get_new_sst_ids_latency_counts[$__rate_interval]))by(job,instance)","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"get_new_sst_ids_latency_counts - {{instance}} ","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"table_count","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":159,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p50 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.99, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p99 - {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"sum(irate(state_store_report_compaction_task_latency_sum[$__rate_interval])) / sum(irate(state_store_report_compaction_task_latency_count[$__rate_interval]))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_avg","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.90, sum(irate(state_store_report_compaction_task_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"report_compaction_task_latency_p90 - {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"compaction_latency","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"gRPC: Hummock Meta Client","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":177},"height":null,"hideTimeOverride":false,"id":160,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":161,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(frontend_query_counter_local_execution[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per second in Loacl Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"Qps"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":162,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(distributed_completed_query_counter[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Per second in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":163,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_running_query_num","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of running query in distributed execution mode","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Running query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":164,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_rejected_query_counter","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of rejected query in distributed execution mode","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Rejected query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":165,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["last"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"distributed_completed_query_counter","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"The number of completed query in distributed execution mode","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Completed query in distributed execution mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":166,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(distributed_query_latency_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Distributed Execution Mode","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"s"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":24},"height":null,"hideTimeOverride":false,"id":167,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.5, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p50 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.9, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p90 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""},{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"histogram_quantile(0.95, sum(rate(frontend_latency_local_execution_bucket[$__rate_interval])) by (le, job, instance))","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"p99 - {{job}} @ {{instance}}","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"Query Latency in Local Execution Mode","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Frontend","transformations":[],"transparent":false,"type":"row"},{"cacheTimeout":null,"collapsed":true,"datasource":null,"description":null,"editable":true,"error":false,"fieldConfig":{"defaults":{"thresholds":{"mode":"absolute","steps":[]}}},"gridPos":{"h":1,"w":24,"x":0,"y":178},"height":null,"hideTimeOverride":false,"id":168,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"panels":[{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":0},"height":null,"hideTimeOverride":false,"id":169,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"rate(lru_runtime_loop_count[$__rate_interval])","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager loop count per sec","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":0},"height":null,"hideTimeOverride":false,"id":170,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_watermark_step","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager watermark steps","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"watermark_time is the current lower watermark of cached data. physical_now is the current time of the machine. The diff (physical_now - watermark_time) shows how much data is cached.","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":""},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":8},"height":null,"hideTimeOverride":false,"id":171,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":["mean"],"displayMode":"table","placement":"right"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"lru_physical_now_ms - lru_current_watermark_time_ms","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"LRU manager diff between watermark_time and now (ms)","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":8},"height":null,"hideTimeOverride":false,"id":172,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"jemalloc_allocated_bytes","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by jemalloc","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":0,"y":16},"height":null,"hideTimeOverride":false,"id":173,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"stream_total_mem_usage","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by streaming","transformations":[],"transparent":false,"type":"timeseries"},{"cacheTimeout":null,"datasource":null,"description":"","editable":true,"error":false,"fieldConfig":{"defaults":{"color":{"mode":"palette-classic"},"custom":{"axisLabel":"","axisPlacement":"auto","barAlignment":0,"drawStyle":"line","fillOpacity":10,"gradientMode":"none","hideFrom":{"legend":false,"tooltip":false,"viz":false},"lineInterpolation":"linear","lineWidth":1,"pointSize":5,"scaleDistribution":{"log":2,"type":"linear"},"showPoints":"auto","spanNulls":false,"stacking":{},"thresholdsStyle":{"mode":"off"}},"mappings":[],"thresholds":{"mode":"absolute","steps":[]},"unit":"bytes"},"overrides":[]},"gridPos":{"h":8,"w":12,"x":12,"y":16},"height":null,"hideTimeOverride":false,"id":174,"interval":null,"links":[],"maxDataPoints":100,"maxPerRow":null,"minSpan":null,"options":{"legend":{"calcs":[],"displayMode":"list","placement":"bottom"},"tooltip":{"mode":"single"}},"repeat":null,"repeatDirection":null,"span":null,"targets":[{"datasource":{"type":"prometheus","uid":"risedev-prometheus"},"expr":"batch_total_mem_usage","format":"time_series","hide":false,"instant":false,"interval":"","intervalFactor":2,"legendFormat":"","metric":"","refId":"","step":10,"target":""}],"timeFrom":null,"timeShift":null,"title":"The memory allocated by batch","transformations":[],"transparent":false,"type":"timeseries"}],"repeat":null,"repeatDirection":null,"span":null,"targets":[],"timeFrom":null,"timeShift":null,"title":"Memory manager","transformations":[],"transparent":false,"type":"row"}],"refresh":"10s","rows":[],"schemaVersion":12,"sharedCrosshair":true,"style":"dark","tags":["risingwave"],"templating":{"list":[]},"time":{"from":"now-30m","to":"now"},"timepicker":{"hidden":false,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"browser","title":"risingwave_dashboard","uid":"Ecy3uV1nz","version":0} diff --git a/integration_tests/README.md b/integration_tests/README.md new file mode 100644 index 0000000000000..0e09cf6f0dba7 --- /dev/null +++ b/integration_tests/README.md @@ -0,0 +1,46 @@ +# RisingWave Demos + +Here is a gallery of demos that present how to use RisingWave alongwith the ecosystem tools. + +- `ad-click/`: [Build and Maintain Real-time Applications Faster and Easier with Redpanda and RisingWave](https://singularity-data.com/blog/build-with-Redpanda-and-RisingWave) +- `ad-ctr`: [Perform real-time ad performance analysis](https://www.risingwave.dev/docs/latest/real-time-ad-performance-analysis/) +- `cdn-metrics`: [Server performance anomaly detection](https://www.risingwave.dev/docs/latest/server-performance-anomaly-detection/) +- `clickstream`: [Clickstream analysis](https://www.risingwave.dev/docs/latest/clickstream-analysis/) +- `twitter`: [Fast Twitter events processing](https://www.risingwave.dev/docs/latest/fast-twitter-events-processing/) +- `twitter-pulsar`: [Tutorial: Pulsar + RisingWave for Fast Twitter Event Processing](https://www.risingwave.com/blog/tutorial-pulsar-risingwave-for-fast-twitter-events-processing/) +- `live-stream`: [Live stream metrics analysis](https://www.risingwave.dev/docs/latest/live-stream-metrics-analysis/) + +## Demo Runnability Testing + +The demos listed above will all run through a series of tests when each PR is merged, including: + +- Run the queries mentioned in the demos. +- Ingest the data in various formats, including Protobuf, Avro, and JSON. Each format will be tested individually. +- For each demo test, we check if the sources and MVs have successfully ingested data, meaning that they should have >0 records. + +## Workload Generator + +The workloads presented in the demos are produced by a golang program in `/datagen`. You can get this tool in multiple ways: + +- Download pre-built binaries from [Releases](https://github.com/risingwavelabs/risingwave-demo/releases) +- Pull the latest docker image via `docker pull ghcr.io/risingwavelabs/demo-datagen:v1.0.9`. +- Build the binary from source: + ```sh + cd datagen && go build + ``` + +To use this tool, you can run the following command: + +```sh +./datagen --mode clickstream --qps 10 kafka --brokers 127.0.0.1:57801 +``` + +or + +```sh +./datagen --mode ecommerce --qps 10000000 postgres --port 6875 --user materialize --db materialize +``` + +- `--mode clickstream` indicates that it will produce random clickstream data. +- `--qps 10` sets a QPS limit to 10. +- `kafka | postgres` chooses the destination. For kafka, you will need to specify the brokers. diff --git a/integration_tests/ad-click/create_mv.sql b/integration_tests/ad-click/create_mv.sql new file mode 100644 index 0000000000000..828cb6dcff16e --- /dev/null +++ b/integration_tests/ad-click/create_mv.sql @@ -0,0 +1,13 @@ +-- The number of clicks on the ad within one minute after the ad was shown. +create materialized view m_click_statistic as +select + count(user_id) as clicks_count, + ad_id +from + ad_source +where + click_timestamp is not null + and impression_timestamp < click_timestamp + and impression_timestamp + interval '1' minute >= click_timestamp +group by + ad_id; \ No newline at end of file diff --git a/integration_tests/ad-click/create_source.sql b/integration_tests/ad-click/create_source.sql new file mode 100644 index 0000000000000..532f980ad52ed --- /dev/null +++ b/integration_tests/ad-click/create_source.sql @@ -0,0 +1,13 @@ +-- impression_timestamp: The time when the ad was shown. +-- click_timestamp: The time when the ad was clicked. +create source ad_source ( + user_id bigint, + ad_id bigint, + click_timestamp timestamptz, + impression_timestamp timestamptz +) with ( + connector = 'kafka', + topic = 'ad_clicks', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) row format json; \ No newline at end of file diff --git a/integration_tests/ad-click/data_check b/integration_tests/ad-click/data_check new file mode 100644 index 0000000000000..77a90db334156 --- /dev/null +++ b/integration_tests/ad-click/data_check @@ -0,0 +1 @@ +ad_source,m_click_statistic \ No newline at end of file diff --git a/integration_tests/ad-click/docker-compose.yml b/integration_tests/ad-click/docker-compose.yml new file mode 100644 index 0000000000000..ab8f175db1252 --- /dev/null +++ b/integration_tests/ad-click/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode ad-click --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/ad-click/query.sql b/integration_tests/ad-click/query.sql new file mode 100644 index 0000000000000..01a1a7af3cba1 --- /dev/null +++ b/integration_tests/ad-click/query.sql @@ -0,0 +1,6 @@ +select + * +from + m_click_statistic +limit + 10; \ No newline at end of file diff --git a/integration_tests/ad-ctr/create_mv.sql b/integration_tests/ad-ctr/create_mv.sql new file mode 100644 index 0000000000000..7c1cbe1cdeac7 --- /dev/null +++ b/integration_tests/ad-ctr/create_mv.sql @@ -0,0 +1,64 @@ +CREATE MATERIALIZED VIEW ad_ctr AS +SELECT + ad_clicks.ad_id AS ad_id, + ad_clicks.clicks_count :: NUMERIC / ad_impressions.impressions_count AS ctr +FROM + ( + SELECT + ad_impression.ad_id AS ad_id, + COUNT(*) AS impressions_count + FROM + ad_impression + GROUP BY + ad_id + ) AS ad_impressions + JOIN ( + SELECT + ai.ad_id, + COUNT(*) AS clicks_count + FROM + ad_click AS ac + LEFT JOIN ad_impression AS ai ON ac.bid_id = ai.bid_id + GROUP BY + ai.ad_id + ) AS ad_clicks ON ad_impressions.ad_id = ad_clicks.ad_id; + +CREATE MATERIALIZED VIEW ad_ctr_5min AS +SELECT + ac.ad_id AS ad_id, + ac.clicks_count :: NUMERIC / ai.impressions_count AS ctr, + ai.window_end AS window_end +FROM + ( + SELECT + ad_id, + COUNT(*) AS impressions_count, + window_end + FROM + TUMBLE( + ad_impression, + impression_timestamp, + INTERVAL '5' MINUTE + ) + GROUP BY + ad_id, + window_end + ) AS ai + JOIN ( + SELECT + ai.ad_id, + COUNT(*) AS clicks_count, + ai.window_end AS window_end + FROM + TUMBLE(ad_click, click_timestamp, INTERVAL '5' MINUTE) AS ac + INNER JOIN TUMBLE( + ad_impression, + impression_timestamp, + INTERVAL '5' MINUTE + ) AS ai ON ai.bid_id = ac.bid_id + AND ai.window_end = ac.window_end + GROUP BY + ai.ad_id, + ai.window_end + ) AS ac ON ai.ad_id = ac.ad_id + AND ai.window_end = ac.window_end; \ No newline at end of file diff --git a/integration_tests/ad-ctr/create_source.sql b/integration_tests/ad-ctr/create_source.sql new file mode 100644 index 0000000000000..70a4ca6400981 --- /dev/null +++ b/integration_tests/ad-ctr/create_source.sql @@ -0,0 +1,20 @@ +CREATE SOURCE ad_impression ( + bid_id BIGINT, + ad_id BIGINT, + impression_timestamp TIMESTAMPTZ +) WITH ( + connector = 'kafka', + topic = 'ad_impression', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; + +CREATE SOURCE ad_click ( + bid_id BIGINT, + click_timestamp TIMESTAMPTZ +) WITH ( + connector = 'kafka', + topic = 'ad_click', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/ad-ctr/data_check b/integration_tests/ad-ctr/data_check new file mode 100644 index 0000000000000..9708f5cb617c3 --- /dev/null +++ b/integration_tests/ad-ctr/data_check @@ -0,0 +1 @@ +ad_impression,ad_click,ad_ctr,ad_ctr_5min \ No newline at end of file diff --git a/integration_tests/ad-ctr/docker-compose.yml b/integration_tests/ad-ctr/docker-compose.yml new file mode 100644 index 0000000000000..bd12b521d53b5 --- /dev/null +++ b/integration_tests/ad-ctr/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode ad-ctr --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/ad-ctr/query.sql b/integration_tests/ad-ctr/query.sql new file mode 100644 index 0000000000000..4bd10cc551ee4 --- /dev/null +++ b/integration_tests/ad-ctr/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + ad_ctr_5min +limit + 10; \ No newline at end of file diff --git a/integration_tests/cdn-metrics/create_mv.sql b/integration_tests/cdn-metrics/create_mv.sql new file mode 100644 index 0000000000000..cd2c2a7195158 --- /dev/null +++ b/integration_tests/cdn-metrics/create_mv.sql @@ -0,0 +1,79 @@ +CREATE MATERIALIZED VIEW high_util_tcp_metrics AS +SELECT + tcp.device_id AS device_id, + tcp.window_end AS window_end, + tcp.metric_name AS metric_name, + tcp.metric_value AS metric_value, + nic.avg_util AS tcp_avg_bandwidth_util +FROM + ( + SELECT + device_id, + window_end, + metric_name, + AVG(metric_value) AS metric_value + FROM + TUMBLE( + tcp_metrics, + report_time, + INTERVAL '1' MINUTE + ) + GROUP BY + device_id, + window_end, + metric_name + ) AS tcp + JOIN ( + SELECT + device_id, + window_end, + AVG((metric_value) / bandwidth) * 100 AS avg_util + FROM + TUMBLE( + nics_metrics, + report_time, + INTERVAL '1' MINUTE + ) + WHERE + metric_name = 'tx_bytes' + AND aggregation = 'avg' + GROUP BY + device_id, + window_end + ) AS nic ON tcp.device_id = nic.device_id + AND tcp.window_end = nic.window_end +WHERE + avg_util >= 50; + +CREATE MATERIALIZED VIEW retrans_incidents AS +SELECT + device_id, + window_end AS trigger_time, + metric_value AS trigger_value +FROM + high_util_tcp_metrics +WHERE + metric_name = 'retrans_rate' + AND metric_value > 0.15; + +CREATE MATERIALIZED VIEW srtt_incidents AS +SELECT + device_id, + window_end AS trigger_time, + metric_value AS trigger_value +FROM + high_util_tcp_metrics +WHERE + metric_name = 'srtt' + AND metric_value > 500.0; + +CREATE MATERIALIZED VIEW download_incidents AS +SELECT + device_id, + window_end AS trigger_time, + metric_value AS trigger_value +FROM + high_util_tcp_metrics +WHERE + metric_name = 'download_speed' + AND metric_value < 200.0; \ No newline at end of file diff --git a/integration_tests/cdn-metrics/create_source.sql b/integration_tests/cdn-metrics/create_source.sql new file mode 100644 index 0000000000000..3ef8c61af64ba --- /dev/null +++ b/integration_tests/cdn-metrics/create_source.sql @@ -0,0 +1,26 @@ +CREATE SOURCE nics_metrics ( + device_id VARCHAR, + metric_name VARCHAR, + aggregation VARCHAR, + nic_name VARCHAR, + report_time TIMESTAMPTZ, + bandwidth DOUBLE PRECISION, + metric_value DOUBLE PRECISION +) WITH ( + connector = 'kafka', + topic = 'nics_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; + +CREATE SOURCE tcp_metrics ( + device_id VARCHAR, + metric_name VARCHAR, + report_time TIMESTAMPTZ, + metric_value DOUBLE PRECISION +) WITH ( + connector = 'kafka', + topic = 'tcp_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/cdn-metrics/data_check b/integration_tests/cdn-metrics/data_check new file mode 100644 index 0000000000000..b3847bdc60b77 --- /dev/null +++ b/integration_tests/cdn-metrics/data_check @@ -0,0 +1 @@ +nics_metrics,tcp_metrics \ No newline at end of file diff --git a/integration_tests/cdn-metrics/docker-compose.yml b/integration_tests/cdn-metrics/docker-compose.yml new file mode 100644 index 0000000000000..617566382b7f7 --- /dev/null +++ b/integration_tests/cdn-metrics/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --heavytail --mode cdn-metrics --qps 1000 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/cdn-metrics/query.sql b/integration_tests/cdn-metrics/query.sql new file mode 100644 index 0000000000000..7fb778a38640a --- /dev/null +++ b/integration_tests/cdn-metrics/query.sql @@ -0,0 +1,8 @@ +SELECT + * +FROM + srtt_incidents +limit + 10; + +SELECT * FROM high_util_tcp_metrics limit 5; diff --git a/integration_tests/clickstream/create_mv.sql b/integration_tests/clickstream/create_mv.sql new file mode 100644 index 0000000000000..4d1e033470f57 --- /dev/null +++ b/integration_tests/clickstream/create_mv.sql @@ -0,0 +1,34 @@ +CREATE MATERIALIZED VIEW thread_view_count AS WITH t AS ( + SELECT + target_id, + COUNT() AS view_count, + window_start as window_time + FROM + TUMBLE( + user_behaviors, + event_timestamp, + INTERVAL '10 minutes' + ) + WHERE + target_type = 'thread' + AND behavior_type = 'show' + GROUP BY + target_id, + window_start +) +SELECT + target_id, + SUM(t.view_count) AS view_count, + window_start as window_time, + window_end +FROM + HOP( + t, + t.window_time, + INTERVAL '10 minutes', + INTERVAL '1440 minutes' + ) +GROUP BY + target_id, + window_start, + window_end; \ No newline at end of file diff --git a/integration_tests/clickstream/create_source.sql b/integration_tests/clickstream/create_source.sql new file mode 100644 index 0000000000000..7a9e3d3add4c8 --- /dev/null +++ b/integration_tests/clickstream/create_source.sql @@ -0,0 +1,14 @@ +CREATE SOURCE user_behaviors ( + user_id VARCHAR, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMPTZ, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR +) WITH ( + connector = 'kafka', + topic = 'user_behaviors', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/clickstream/data_check b/integration_tests/clickstream/data_check new file mode 100644 index 0000000000000..da8bdf62ada77 --- /dev/null +++ b/integration_tests/clickstream/data_check @@ -0,0 +1 @@ +user_behaviors,thread_view_count \ No newline at end of file diff --git a/integration_tests/clickstream/docker-compose.yml b/integration_tests/clickstream/docker-compose.yml new file mode 100644 index 0000000000000..b7a1573d25253 --- /dev/null +++ b/integration_tests/clickstream/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode clickstream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/clickstream/query.sql b/integration_tests/clickstream/query.sql new file mode 100644 index 0000000000000..917bdfb717fc9 --- /dev/null +++ b/integration_tests/clickstream/query.sql @@ -0,0 +1,19 @@ +--- TODO: we need now() for ad-hoc mode. +-- SELECT +-- * +-- FROM +-- thread_view_count +-- WHERE +-- window_time > ( +-- '2022-7-22 18:43' :: TIMESTAMP - INTERVAL '1 day' +-- ) +-- AND window_time < ( +-- '2022-7-22 18:43' :: TIMESTAMP - INTERVAL '1 day' + INTERVAL '10 minutes' +-- ) +-- AND target_id = 'thread83 +SELECT + * +FROM + thread_view_count +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/datagen/.gitignore b/integration_tests/datagen/.gitignore new file mode 100644 index 0000000000000..503f832e84296 --- /dev/null +++ b/integration_tests/datagen/.gitignore @@ -0,0 +1 @@ +datagen diff --git a/integration_tests/datagen/.goreleaser.yaml b/integration_tests/datagen/.goreleaser.yaml new file mode 100644 index 0000000000000..bbfdb6483d5c7 --- /dev/null +++ b/integration_tests/datagen/.goreleaser.yaml @@ -0,0 +1,21 @@ +before: + hooks: + - go mod tidy +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + binary: datagen +checksum: + name_template: "checksums.txt" +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" diff --git a/integration_tests/datagen/Dockerfile b/integration_tests/datagen/Dockerfile new file mode 100644 index 0000000000000..59354cbecab59 --- /dev/null +++ b/integration_tests/datagen/Dockerfile @@ -0,0 +1,8 @@ +FROM golang as builder + +ADD . /datagen-src +RUN cd /datagen-src && gofmt -s -w . && go build + +FROM ubuntu:20.04 + +COPY --from=builder /datagen-src/datagen / diff --git a/integration_tests/datagen/ad_click/ad_click.go b/integration_tests/datagen/ad_click/ad_click.go new file mode 100644 index 0000000000000..eb97467784d31 --- /dev/null +++ b/integration_tests/datagen/ad_click/ad_click.go @@ -0,0 +1,58 @@ +package ad_click + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "math/rand" + "time" +) + +type clickEvent struct { + sink.BaseSinkRecord + + UserId int64 `json:"user_id"` + AdId int64 `json:"ad_id"` + ClickTimestamp string `json:"click_timestamp"` + ImpressionTimestamp string `json:"impression_timestamp"` +} + +func (r *clickEvent) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO %s (user_id, ad_id, click_timestamp, impression_timestamp) values ('%d', '%d', '%s', '%s')", + "ad_source", r.UserId, r.AdId, r.ClickTimestamp, r.ImpressionTimestamp) +} + +func (r *clickEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "ad_clicks", fmt.Sprint(r.UserId), data +} + +type adClickGen struct { +} + +func NewAdClickGen() gen.LoadGenerator { + return &adClickGen{} +} + +func (g *adClickGen) KafkaTopics() []string { + return []string{"ad_clicks"} +} + +func (g *adClickGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + now := time.Now() + record := &clickEvent{ + UserId: rand.Int63n(100000), + AdId: rand.Int63n(10), + ClickTimestamp: now.Add(time.Duration(rand.Intn(1000)) * time.Millisecond).Format(gen.RwTimestampLayout), + ImpressionTimestamp: now.Format(gen.RwTimestampLayout), + } + select { + case <-ctx.Done(): + return + case outCh <- record: + } + } +} diff --git a/integration_tests/datagen/ad_ctr/ad_ctr.go b/integration_tests/datagen/ad_ctr/ad_ctr.go new file mode 100644 index 0000000000000..a180c0c267e27 --- /dev/null +++ b/integration_tests/datagen/ad_ctr/ad_ctr.go @@ -0,0 +1,111 @@ +package ad_ctr + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/brianvoe/gofakeit/v6" +) + +type adImpressionEvent struct { + sink.BaseSinkRecord + + BidId int64 `json:"bid_id"` + AdId int64 `json:"ad_id"` + ImpressionTimestamp string `json:"impression_timestamp"` +} + +func (r *adImpressionEvent) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO %s (bid_id, ad_id, impression_timestamp) values ('%d', '%d', '%s')", + "ad_impression", r.BidId, r.AdId, r.ImpressionTimestamp) +} + +func (r *adImpressionEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "ad_impression", fmt.Sprint(r.BidId), data +} + +type adClickEvent struct { + sink.BaseSinkRecord + + BidId int64 `json:"bid_id"` + ClickTimestamp string `json:"click_timestamp"` +} + +func (r *adClickEvent) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO %s (bid_id, click_timestamp) values ('%d', '%s')", + "ad_click", r.BidId, r.ClickTimestamp) +} + +func (r *adClickEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "ad_click", fmt.Sprint(r.BidId), data +} + +type adCtrGen struct { + faker *gofakeit.Faker + ctr map[int64]float64 +} + +func NewAdCtrGen() gen.LoadGenerator { + return &adCtrGen{ + ctr: make(map[int64]float64), + faker: gofakeit.New(0), + } +} + +func (g *adCtrGen) getCtr(adId int64) float64 { + if ctr, ok := g.ctr[adId]; ok { + return ctr + } + ctr := g.faker.Float64Range(0, 1) + g.ctr[adId] = ctr + return ctr +} + +func (g *adCtrGen) hasClick(adId int64) bool { + return g.faker.Float64Range(0, 1) < g.getCtr(adId) +} + +func (g *adCtrGen) generate() []sink.SinkRecord { + bidId, _ := strconv.ParseInt(g.faker.DigitN(8), 10, 64) + adId := int64(g.faker.IntRange(1, 10)) + + events := []sink.SinkRecord{ + &adImpressionEvent{ + BidId: bidId, + AdId: adId, + ImpressionTimestamp: time.Now().Format(gen.RwTimestampLayout), + }, + } + if g.hasClick(adId) { + randomDelay := time.Duration(g.faker.IntRange(1, 10) * int(time.Second)) + events = append(events, &adClickEvent{ + BidId: bidId, + ClickTimestamp: time.Now().Add(randomDelay).Format(gen.RwTimestampLayout), + }) + } + return events +} + +func (g *adCtrGen) KafkaTopics() []string { + return []string{"ad_impression", "ad_click"} +} + +func (g *adCtrGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + records := g.generate() + for _, record := range records { + select { + case outCh <- record: + case <-ctx.Done(): + return + } + } + } +} diff --git a/integration_tests/datagen/cdn_metrics/cdn_metrics.go b/integration_tests/datagen/cdn_metrics/cdn_metrics.go new file mode 100644 index 0000000000000..aff55f87a80f3 --- /dev/null +++ b/integration_tests/datagen/cdn_metrics/cdn_metrics.go @@ -0,0 +1,32 @@ +package cdn_metrics + +import ( + "context" + "datagen/gen" + "datagen/sink" +) + +type cdnMetricsGen struct { + cfg gen.GeneratorConfig +} + +func NewCdnMetricsGen(cfg gen.GeneratorConfig) gen.LoadGenerator { + return &cdnMetricsGen{cfg: cfg} +} + +func (g *cdnMetricsGen) KafkaTopics() []string { + return []string{"tcp_metrics", "nics_metrics"} +} + +func (g *cdnMetricsGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for i := 0; i < 10; i++ { // Assume there are 10 devices + go func(i int) { + m := newDeviceTcpMonitor(i, g.cfg) + m.emulate(ctx, outCh) + }(i) + go func(i int) { + m := newDeviceNicsMonitor(i, g.cfg) + m.emulate(ctx, outCh) + }(i) + } +} diff --git a/integration_tests/datagen/cdn_metrics/nics.go b/integration_tests/datagen/cdn_metrics/nics.go new file mode 100644 index 0000000000000..dc280eab1468e --- /dev/null +++ b/integration_tests/datagen/cdn_metrics/nics.go @@ -0,0 +1,108 @@ +package cdn_metrics + +import ( + "context" + "crypto/md5" + "datagen/gen" + "datagen/sink" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "time" +) + +const ( + // bandwidth in bytes. + maxBandwidth = float64(10 * 1024 * 1024 * 1024 / 8) // 10Gb +) + +type nicsMetric struct { + sink.BaseSinkRecord + + DeviceId string `json:"device_id"` + MetricName string `json:"metric_name"` + Aggregation string `json:"aggregation"` + NicName string `json:"nic_name"` + ReportTime string `json:"report_time"` + Bandwidth float64 `json:"bandwidth"` + Value float64 `json:"metric_value"` +} + +func (r *nicsMetric) ToPostgresSql() string { + return fmt.Sprintf( + `INSERT INTO %s +(device_id, metric_name, aggregation, nic_name, report_time, link_bandwidth, metric_value) +values ('%s', '%s', '%s' '%s', '%s', '%f', '%f')`, + "nics_metrics", r.DeviceId, r.MetricName, r.Aggregation, r.NicName, r.ReportTime, r.Bandwidth, r.Value) +} + +func (r *nicsMetric) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "nics_metrics", r.DeviceId, data +} + +// Each device has a nics monitor. +type deviceNicsMonitor struct { + deviceId string + randDist gen.RandDist +} + +func newDeviceNicsMonitor(id int, cfg gen.GeneratorConfig) deviceNicsMonitor { + hash := md5.Sum([]byte(strconv.Itoa(id))) + return deviceNicsMonitor{ + deviceId: hex.EncodeToString(hash[:]), + randDist: gen.NewRandDist(cfg), + } +} + +func (m *deviceNicsMonitor) emulate(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + metrics := m.generate() + for _, metric := range metrics { + select { + case outCh <- metric: + case <-ctx.Done(): + return + } + } + select { + case <-ctx.Done(): + case <-time.NewTicker(10 * time.Second).C: + } + } +} + +func (impl *deviceNicsMonitor) generate() []*nicsMetric { + curTime := time.Now() + var metrics []*nicsMetric + for nicId := 0; nicId < 4; nicId++ { + // Median value is 480MB/s. + txBytesAvg := impl.randDist.Rand(maxBandwidth / 4 * 3) + // Peak value must be larger than average but lower than maxBandwidth. + txBytesPeak := (impl.randDist.Rand(0.3) + 1) * txBytesAvg + metrics = append(metrics, + impl.newMetrics(nicId, "tx_bytes", "avg", curTime, int64(txBytesAvg)), + impl.newMetrics(nicId, "tx_bytes", "peak", curTime, int64(txBytesPeak)), + ) + } + return metrics +} + +func (impl *deviceNicsMonitor) newMetrics( + NicId int, + metricName string, + aggregation string, + reportTime time.Time, + value int64) *nicsMetric { + + return &nicsMetric{ + DeviceId: impl.deviceId, + MetricName: metricName, + Aggregation: aggregation, + NicName: "eth" + strconv.Itoa(NicId), + ReportTime: reportTime.Format(gen.RwTimestampLayout), + Bandwidth: maxBandwidth, + Value: float64(value), + } +} diff --git a/integration_tests/datagen/cdn_metrics/tcp.go b/integration_tests/datagen/cdn_metrics/tcp.go new file mode 100644 index 0000000000000..909c0ebf07058 --- /dev/null +++ b/integration_tests/datagen/cdn_metrics/tcp.go @@ -0,0 +1,88 @@ +package cdn_metrics + +import ( + "context" + "crypto/md5" + "datagen/gen" + "datagen/sink" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "time" +) + +type tcpMetric struct { + sink.BaseSinkRecord + + DeviceId string `json:"device_id"` + ReportTime string `json:"report_time"` + MetricName string `json:"metric_name"` + Value float64 `json:"metric_value"` +} + +func (r *tcpMetric) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO %s (device_id, report_time, metric_name, metric_value) values ('%s', '%s', '%s', '%f')", + "tcp_metrics", r.DeviceId, r.ReportTime, r.MetricName, r.Value) +} + +func (r *tcpMetric) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "tcp_metrics", r.DeviceId, data +} + +// Each device has a TCP monitor. +type deviceTcpMonitor struct { + deviceId string + randDist gen.RandDist +} + +func newDeviceTcpMonitor(id int, cfg gen.GeneratorConfig) deviceTcpMonitor { + hash := md5.Sum([]byte(strconv.Itoa(id))) + return deviceTcpMonitor{ + deviceId: hex.EncodeToString(hash[:]), + randDist: gen.NewRandDist(cfg), + } +} + +func (m *deviceTcpMonitor) emulate(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + metrics := m.generate() + for _, metric := range metrics { + select { + case <-ctx.Done(): + return + case outCh <- metric: + } + } + // Produce tcp metrics every 1s. + select { + case <-ctx.Done(): + case <-time.NewTicker(1 * time.Second).C: + } + } +} + +func (m *deviceTcpMonitor) generate() []*tcpMetric { + curTime := time.Now() + + retransRate := m.randDist.Rand(0.6) + srtt := m.randDist.Rand(1400) + downloadSpeed := m.randDist.Rand(2000) + + return []*tcpMetric{ + m.newMetrics("retrans_rate", curTime, retransRate), + // Smoothed Round Trip Time ( SRTT ) + m.newMetrics("srtt", curTime, srtt), + m.newMetrics("download_speed", curTime, downloadSpeed), + } +} + +func (m *deviceTcpMonitor) newMetrics(metricName string, reportTime time.Time, value float64) *tcpMetric { + return &tcpMetric{ + DeviceId: m.deviceId, + MetricName: metricName, + ReportTime: reportTime.Format(gen.RwTimestampLayout), + Value: value, + } +} diff --git a/integration_tests/datagen/clickstream/clickstream.go b/integration_tests/datagen/clickstream/clickstream.go new file mode 100644 index 0000000000000..8a81f48a5430d --- /dev/null +++ b/integration_tests/datagen/clickstream/clickstream.go @@ -0,0 +1,153 @@ +package clickstream + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "time" + + "github.com/brianvoe/gofakeit/v6" +) + +type userBehavior struct { + sink.BaseSinkRecord + + UserId string `json:"user_id"` + TargetId string `json:"target_id"` + TargetType string `json:"target_type"` + EventTimestamp string `json:"event_timestamp"` + BehaviorType string `json:"behavior_type"` + + // The two fields are used to express the following behaviors: + // - Comment on a thread + // - Comment on a comment. + // Otherwise the fields will be empty. + ParentTargetType string `json:"parent_target_type"` + ParentTargetId string `json:"parent_target_id"` +} + +func (r *userBehavior) ToPostgresSql() string { + return fmt.Sprintf(`INSERT INTO %s +(user_id, target_id, target_type, event_timestamp, behavior_type, parent_target_type, parent_target_id) +values ('%s', '%s', '%s', '%s', '%s', '%s', '%s')`, + "user_behaviors", r.UserId, r.TargetId, r.TargetType, r.EventTimestamp, r.BehaviorType, r.ParentTargetType, r.ParentTargetId) +} + +func (r *userBehavior) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "user_behaviors", r.UserId, data +} + +type targetType string + +type clickStreamGen struct { + faker *gofakeit.Faker +} + +func NewClickStreamGen() gen.LoadGenerator { + return &clickStreamGen{ + faker: gofakeit.New(0), + } +} + +func (g *clickStreamGen) randTargetType() targetType { + switch p := g.faker.IntRange(0, 9); { + case p < 7: + return "thread" + case p >= 7 && p < 9: + return "comment" + case p >= 9: + return "user" + default: + panic(fmt.Sprintf("unreachable: %d", p)) + } +} + +func (g *clickStreamGen) randBehaviorType(t targetType) string { + switch t { + case "thread": + switch p := g.faker.IntRange(0, 99); { + case p < 40: + return "show" + case p >= 40 && p < 65: + return "upvote" + case p >= 65 && p < 70: + return "downvote" + case p >= 70 && p < 75: + return "share" + case p >= 75 && p < 80: + return "award" + case p >= 80 && p < 90: + return "save" + case p >= 90: + return "publish" // Publish a thread. + default: + panic(fmt.Sprintf("unreachable: %d", p)) + } + case "comment": + behaviors := []string{ + "publish", // Publish a comment, the parent target type can be a comment or a thread. + "upvote", + "downvote", + "share", + "award", + "save", + } + return behaviors[g.faker.IntRange(0, len(behaviors)-1)] + case "user": + behaviors := []string{ + "show", // View the user profile. + "follow", + "share", + "unfollow", + } + return behaviors[g.faker.IntRange(0, len(behaviors)-1)] + default: + panic("unexpected target type") + } +} + +func (g *clickStreamGen) generate() sink.SinkRecord { + // TODO: The overall throughput can be further controlled by a scale factor. + userId := g.faker.IntRange(0, 10) + targetId := g.faker.IntRange(0, 100) + target := g.randTargetType() + behavior := g.randBehaviorType(target) + // NOTE: The generated event might not be realistic, for example, a user is allowed to follow itself, + // and a user can upvote a not existed thread. Anyway, it's just a simple demo. + + var parentTargetId string + var parentTargetType string + if target == "comment" && behavior == "publish" { + possibleTargets := []string{"thread", "comment"} + parentTargetType = possibleTargets[g.faker.IntRange(0, len(possibleTargets)-1)] + parentTargetId = parentTargetType + fmt.Sprint(g.faker.IntRange(0, 100)) + } + + return &userBehavior{ + UserId: fmt.Sprint(userId), + TargetId: string(target) + fmt.Sprint(targetId), + TargetType: string(target), + EventTimestamp: time.Now().Format(gen.RwTimestampLayout), + BehaviorType: behavior, + ParentTargetType: parentTargetType, + ParentTargetId: parentTargetId, + } +} + +func (g *clickStreamGen) KafkaTopics() []string { + return []string{"user_behaviors"} +} + +func (g *clickStreamGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + record := g.generate() + select { + case <-ctx.Done(): + return + case outCh <- record: + } + } +} diff --git a/integration_tests/datagen/delivery/delivery.go b/integration_tests/datagen/delivery/delivery.go new file mode 100644 index 0000000000000..167d911d5295f --- /dev/null +++ b/integration_tests/datagen/delivery/delivery.go @@ -0,0 +1,73 @@ +package delivery + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "math/rand" + "time" +) + +type orderEvent struct { + sink.BaseSinkRecord + + OrderId int64 `json:"order_id"` + RestaurantId int64 `json:"restaurant_id"` + OrderState string `json:"order_state"` + OrderTimestamp string `json:"order_timestamp"` +} + +func (r *orderEvent) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO %s (order_id, restaurant_id, order_state, order_timestamp) values ('%d', '%d', '%s', '%s')", + "delivery_orders_source", r.OrderId, r.RestaurantId, r.OrderState, r.OrderTimestamp) +} + +func (r *orderEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "delivery_orders", fmt.Sprint(r.OrderId), data +} + +type orderEventGen struct { + seqOrderId int64 + cfg gen.GeneratorConfig +} + +func NewOrderEventGen(cfg gen.GeneratorConfig) gen.LoadGenerator { + return &orderEventGen{ + seqOrderId: 0, + cfg: cfg, + } +} + +func (g *orderEventGen) KafkaTopics() []string { + return []string{"delivery_orders"} +} + +func (g *orderEventGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + order_states := []string{ + "CREATED", + "PENDING", + "DELIVERED", + } + + var num_of_restaurants int64 = 3 + var total_minutes = 30 + + for { + now := time.Now() + record := &orderEvent{ + OrderId: g.seqOrderId, + RestaurantId: rand.Int63n(num_of_restaurants), + OrderState: order_states[rand.Intn(len(order_states))], + OrderTimestamp: now.Add(time.Duration(rand.Intn(total_minutes)) * time.Minute).Format(gen.RwTimestampLayout), + } + g.seqOrderId++ + select { + case <-ctx.Done(): + return + case outCh <- record: + } + } +} diff --git a/integration_tests/datagen/ecommerce/ecommerce.go b/integration_tests/datagen/ecommerce/ecommerce.go new file mode 100644 index 0000000000000..b43b084d96631 --- /dev/null +++ b/integration_tests/datagen/ecommerce/ecommerce.go @@ -0,0 +1,139 @@ +package ecommerce + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "math/rand" + "time" + + "github.com/brianvoe/gofakeit/v6" +) + +// The order details. +type orderEvent struct { + sink.BaseSinkRecord + + OrderId int64 `json:"order_id"` + ItemId int64 `json:"item_id"` + ItemPrice float64 `json:"item_price"` + EventTimestamp string `json:"event_timestamp"` +} + +func (r *orderEvent) ToPostgresSql() string { + return fmt.Sprintf(`INSERT INTO %s +(order_id, item_id, item_price, event_timestamp) +values ('%d', '%d', %f, '%s')`, + "order_events", r.OrderId, r.ItemId, r.ItemPrice, r.EventTimestamp) +} + +func (r *orderEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "order_events", fmt.Sprint(r.OrderId), data +} + +// Each order/trade will be composed of two events: +// An 'order_created' event and a 'parcel_shipped' event. +type parcelEvent struct { + sink.BaseSinkRecord + + OrderId int64 `json:"order_id"` + EventTimestamp string `json:"event_timestamp"` + EventType string `json:"event_type"` +} + +func (r *parcelEvent) ToPostgresSql() string { + return fmt.Sprintf(`INSERT INTO %s +(order_id, event_timestamp, event_type) +values ('%d', '%s', '%s')`, + "parcel_events", r.OrderId, r.EventTimestamp, r.EventType) +} + +func (r *parcelEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "parcel_events", fmt.Sprint(r.OrderId), data +} + +type ecommerceGen struct { + faker *gofakeit.Faker + + // We simply model orders as a sliding window. `seqOrderId` advances as new orders are created. + // `seqShipId` is always smaller than `seqOrderId` and is advanced when a new order is shipped. + seqOrderId int64 + seqShipId int64 + + // Item ID -> Item Price + items []float64 +} + +func NewEcommerceGen() gen.LoadGenerator { + const numItems = 1000 + items := make([]float64, numItems) + for i := 0; i < numItems; i++ { + items[i] = gofakeit.Float64Range(0, 10000) + } + return &ecommerceGen{ + faker: gofakeit.New(0), + seqOrderId: 0, + seqShipId: 0, + items: items, + } +} + +func (g *ecommerceGen) KafkaTopics() []string { + return []string{"order_events", "parcel_events"} +} + +func (g *ecommerceGen) generate() []sink.SinkRecord { + ts := time.Now().Format(gen.RwTimestampLayout) + + if g.faker.Bool() && g.seqShipId >= g.seqOrderId { + // New order. + g.seqOrderId++ + itemsNum := g.faker.IntRange(1, 4) + orders := make([]sink.SinkRecord, itemsNum) + for i := 0; i < itemsNum; i++ { + itemId := rand.Intn(len(g.items)) + itemPrice := g.items[itemId] + orders[i] = &orderEvent{ + OrderId: g.seqOrderId, + ItemId: int64(itemId), + ItemPrice: itemPrice, + EventTimestamp: ts, + } + } + var records []sink.SinkRecord + records = append(records, orders...) + records = append(records, &parcelEvent{ + OrderId: g.seqOrderId, + EventTimestamp: ts, + EventType: "order_created", + }) + return records + } else { + // Ship order. + g.seqShipId++ + return []sink.SinkRecord{ + &parcelEvent{ + OrderId: g.seqShipId, + EventType: "parcel_shipped", + EventTimestamp: ts, + }, + } + } +} + +func (g *ecommerceGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + records := g.generate() + for _, record := range records { + select { + case <-ctx.Done(): + return + case outCh <- record: + } + } + } +} diff --git a/integration_tests/datagen/gen/generator.go b/integration_tests/datagen/gen/generator.go new file mode 100644 index 0000000000000..af9fa9db96d8e --- /dev/null +++ b/integration_tests/datagen/gen/generator.go @@ -0,0 +1,96 @@ +package gen + +import ( + "context" + "datagen/sink" + "datagen/sink/kafka" + "datagen/sink/kinesis" + "datagen/sink/mysql" + "datagen/sink/postgres" + "datagen/sink/pulsar" + + "gonum.org/v1/gonum/stat/distuv" +) + +type GeneratorConfig struct { + Postgres postgres.PostgresConfig + Mysql mysql.MysqlConfig + Kafka kafka.KafkaConfig + Pulsar pulsar.PulsarConfig + Kinesis kinesis.KinesisConfig + + // Whether to print the content of every event. + PrintInsert bool + // The datagen mode, e.g. "ad-ctr". + Mode string + // The sink type. + Sink string + // The throttled requests-per-second. + Qps int + + // Whether the tail probability is high. + // If true, We will use uniform distribution for randomizing values. + HeavyTail bool + + // The record format, used when the sink is a message queue. + Format string +} + +type LoadGenerator interface { + KafkaTopics() []string + + Load(ctx context.Context, outCh chan<- sink.SinkRecord) +} + +const RwTimestampLayout = "2006-01-02 15:04:05.07+01:00" + +type RandDist interface { + // Rand returns a random number ranging from [0, max]. + Rand(max float64) float64 +} + +func NewRandDist(cfg GeneratorConfig) RandDist { + if cfg.HeavyTail { + return UniformDist{} + } else { + return PoissonDist{} + } +} + +type UniformDist struct { + u map[float64]distuv.Uniform +} + +func (ud UniformDist) Rand(max float64) float64 { + if ud.u == nil { + ud.u = make(map[float64]distuv.Uniform) + } + _, ok := ud.u[max] + if !ok { + ud.u[max] = distuv.Uniform{ + Min: 0, + Max: max, + } + } + gen_num := ud.u[max].Rand() + return gen_num +} + +// A more real-world distribution. The tail will have lower probability.. +type PoissonDist struct { + ps map[float64]distuv.Poisson +} + +func (pd PoissonDist) Rand(max float64) float64 { + if pd.ps == nil { + pd.ps = make(map[float64]distuv.Poisson) + } + _, ok := pd.ps[max] + if !ok { + pd.ps[max] = distuv.Poisson{ + Lambda: max / 2, + } + } + gen_num := pd.ps[max].Rand() + return gen_num +} diff --git a/integration_tests/datagen/go.mod b/integration_tests/datagen/go.mod new file mode 100644 index 0000000000000..9316a7df13a3b --- /dev/null +++ b/integration_tests/datagen/go.mod @@ -0,0 +1,78 @@ +module datagen + +go 1.18 + +require ( + github.com/Shopify/sarama v1.37.2 + github.com/apache/pulsar-client-go v0.8.1 + github.com/aws/aws-sdk-go v1.44.126 + github.com/brianvoe/gofakeit/v6 v6.16.0 + github.com/go-sql-driver/mysql v1.7.0 + github.com/lib/pq v1.10.7 + github.com/linkedin/goavro/v2 v2.9.8 + github.com/urfave/cli v1.22.10 + go.uber.org/ratelimit v0.2.0 + gonum.org/v1/gonum v0.12.0 + google.golang.org/protobuf v1.28.1 +) + +require ( + github.com/99designs/keyring v1.1.6 // indirect + github.com/AthenZ/athenz v1.10.39 // indirect + github.com/DataDog/zstd v1.5.0 // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e // indirect + github.com/ardielle/ardielle-go v1.5.2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/danieljoos/wincred v1.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect + github.com/eapache/go-resiliency v1.3.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d // indirect + github.com/klauspost/compress v1.15.11 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/pierrec/lz4 v2.0.5+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/testify v1.8.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/integration_tests/datagen/go.sum b/integration_tests/datagen/go.sum new file mode 100644 index 0000000000000..34ac14123e168 --- /dev/null +++ b/integration_tests/datagen/go.sum @@ -0,0 +1,836 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM= +github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU= +github.com/AthenZ/athenz v1.10.39 h1:mtwHTF/v62ewY2Z5KWhuZgVXftBej1/Tn80zx4DcawY= +github.com/AthenZ/athenz v1.10.39/go.mod h1:3Tg8HLsiQZp81BJY58JBeU2BR6B/H4/0MQGfCwhHNEA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= +github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/pulsar-client-go v0.8.1 h1:UZINLbH3I5YtNzqkju7g9vrl4CKrEgYSx2rbpvGufrE= +github.com/apache/pulsar-client-go v0.8.1/go.mod h1:yJNcvn/IurarFDxwmoZvb2Ieylg630ifxeO/iXpk27I= +github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e h1:EqiJ0Xil8NmcXyupNqXV9oYDBeWntEIegxLahrTr8DY= +github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e/go.mod h1:Xee4tgYLFpYcPMcTfBYWE1uKRzeciodGTSEDMzsR6i8= +github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4= +github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= +github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.44.126 h1:7HQJw2DNiwpxqMe2H7odGNT2rhO4SRrUe5/8dYXl0Jk= +github.com/aws/aws-sdk-go v1.44.126/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/beefsack/go-rate v0.0.0-20220214233405-116f4ca011a0/go.mod h1:6YNgTHLutezwnBvyneBbwvB8C82y3dcoOj5EQJIdGXA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/brianvoe/gofakeit/v6 v6.16.0 h1:EelCqtfArd8ppJ0z+TpOxXH8sVWNPBadPNdCDSMMw7k= +github.com/brianvoe/gofakeit/v6 v6.16.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/danieljoos/wincred v1.0.2 h1:zf4bhty2iLuwgjgpraD2E9UbvO+fe54XXGJbOwe23fU= +github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= +github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= +github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ= +github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= +github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= +github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= +github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linkedin/goavro/v2 v2.9.8 h1:jN50elxBsGBDGVDEKqUlDuU1cFwJ11K/yrJCBMe/7Wg= +github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/integration_tests/datagen/livestream/livestream.go b/integration_tests/datagen/livestream/livestream.go new file mode 100644 index 0000000000000..a5b68075f0391 --- /dev/null +++ b/integration_tests/datagen/livestream/livestream.go @@ -0,0 +1,147 @@ +package livestream + +import ( + "context" + "datagen/gen" + "datagen/livestream/proto" + "datagen/sink" + "encoding/json" + "fmt" + "math" + "time" + + "github.com/brianvoe/gofakeit/v6" + protobuf "google.golang.org/protobuf/proto" +) + +type liveClient struct { + faker *gofakeit.Faker + + ip string + agent string + id string + country string + roomId string +} + +func (c *liveClient) emulate() *liveMetric { + longestFreezeDuration := int64(c.faker.UintRange(0, 100)) + return &liveMetric{ + Ip: c.ip, + Agent: c.agent, + Id: c.id, + RoomId: c.roomId, + Country: c.country, + VideoBps: int64(c.faker.UintRange(1000, 1000000)), + VideoFps: int64(c.faker.UintRange(30, 40)), + VideoRtt: int64(c.faker.UintRange(100, 300)), + VideoLostPps: int64(c.faker.UintRange(0, 10)), + VideoLongestFreezeDuration: longestFreezeDuration, + VideoTotalFreezeDuration: longestFreezeDuration + int64(c.faker.UintRange(0, 20)), + ReportTimestamp: time.Now().Format(time.RFC3339), + } +} + +func (c *liveClient) reportMetric(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + select { + case <-time.NewTicker(10 * time.Second).C: + record := c.emulate() + select { + case outCh <- record: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +type liveMetric struct { + sink.BaseSinkRecord + + Ip string `json:"client_ip"` + Agent string `json:"user_agent"` + Id string `json:"user_id"` + RoomId string `json:"room_id"` + VideoBps int64 `json:"video_bps"` + VideoFps int64 `json:"video_fps"` + VideoRtt int64 `json:"video_rtt"` + VideoLostPps int64 `json:"video_lost_pps"` + VideoLongestFreezeDuration int64 `json:"video_longest_freeze_duration"` + VideoTotalFreezeDuration int64 `json:"video_total_freeze_duration"` + ReportTimestamp string `json:"report_timestamp"` + Country string `json:"country"` +} + +func (r *liveMetric) ToPostgresSql() string { + return fmt.Sprintf( + ` +INSERT INTO %s (client_ip, user_agent, user_id, room_id, video_bps, video_fps, video_rtt, video_lost_pps, video_longest_freeze_duration, video_total_freeze_duration, report_timestamp, country) +VALUES ('%s', '%s', '%s', '%s', %d, %d, %d, %d, %d, %d, '%s', '%s') +`, + "live_stream_metrics", + r.Ip, r.Agent, r.Id, r.RoomId, r.VideoBps, r.VideoFps, r.VideoRtt, r.VideoLostPps, r.VideoLongestFreezeDuration, r.VideoTotalFreezeDuration, r.ReportTimestamp, r.Country) +} + +func (r *liveMetric) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "live_stream_metrics", fmt.Sprint(r.Id), data +} + +func (r *liveMetric) ToProtobuf() (topic string, key string, data []byte) { + m := proto.LiveStreamMetrics{ + ClientIp: r.Ip, + UserAgent: r.Agent, + UserId: r.Id, + RoomId: r.RoomId, + VideoBps: r.VideoBps, + VideoFps: r.VideoFps, + VideoRtt: r.VideoRtt, + VideoLostPps: r.VideoLostPps, + VideoLongestFreezeDuration: r.VideoLongestFreezeDuration, + VideoTotalFreezeDuration: r.VideoTotalFreezeDuration, + ReportTimestamp: time.Now().Unix(), + Country: r.Country, + } + data, err := protobuf.Marshal(&m) + if err != nil { + panic(err) + } + return "live_stream_metrics", fmt.Sprint(r.Id), data +} + +type liveStreamMetricsGen struct { + faker *gofakeit.Faker + cfg gen.GeneratorConfig +} + +func NewLiveStreamMetricsGen(cfg gen.GeneratorConfig) gen.LoadGenerator { + return &liveStreamMetricsGen{ + faker: gofakeit.New(0), + cfg: cfg, + } +} + +func (g *liveStreamMetricsGen) KafkaTopics() []string { + return []string{"live_stream_metrics"} +} + +func (g *liveStreamMetricsGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + // The number of clients is roughly the QPS. + clients := int(math.Min(float64(g.cfg.Qps), 1000)) + for i := 0; i < clients; i++ { + go func(i int) { + c := &liveClient{ + faker: g.faker, + id: fmt.Sprint(i), + agent: g.faker.UserAgent(), + ip: fmt.Sprintf("%s:%d", g.faker.IPv4Address(), g.faker.Uint16()), + country: g.faker.Country(), + roomId: fmt.Sprint(g.faker.Uint32()), + } + c.reportMetric(ctx, outCh) + }(i) + } +} diff --git a/integration_tests/datagen/livestream/proto/livestream.pb.go b/integration_tests/datagen/livestream/proto/livestream.pb.go new file mode 100644 index 0000000000000..3c098ca70fbbe --- /dev/null +++ b/integration_tests/datagen/livestream/proto/livestream.pb.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: livestream.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LiveStreamMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientIp string `protobuf:"bytes,1,opt,name=client_ip,json=clientIp,proto3" json:"client_ip,omitempty"` + UserAgent string `protobuf:"bytes,2,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` + UserId string `protobuf:"bytes,3,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + RoomId string `protobuf:"bytes,4,opt,name=room_id,json=roomId,proto3" json:"room_id,omitempty"` + VideoBps int64 `protobuf:"varint,5,opt,name=video_bps,json=videoBps,proto3" json:"video_bps,omitempty"` + VideoFps int64 `protobuf:"varint,6,opt,name=video_fps,json=videoFps,proto3" json:"video_fps,omitempty"` + VideoRtt int64 `protobuf:"varint,7,opt,name=video_rtt,json=videoRtt,proto3" json:"video_rtt,omitempty"` + VideoLostPps int64 `protobuf:"varint,8,opt,name=video_lost_pps,json=videoLostPps,proto3" json:"video_lost_pps,omitempty"` + VideoLongestFreezeDuration int64 `protobuf:"varint,9,opt,name=video_longest_freeze_duration,json=videoLongestFreezeDuration,proto3" json:"video_longest_freeze_duration,omitempty"` + VideoTotalFreezeDuration int64 `protobuf:"varint,10,opt,name=video_total_freeze_duration,json=videoTotalFreezeDuration,proto3" json:"video_total_freeze_duration,omitempty"` + ReportTimestamp int64 `protobuf:"varint,11,opt,name=report_timestamp,json=reportTimestamp,proto3" json:"report_timestamp,omitempty"` + Country string `protobuf:"bytes,12,opt,name=country,proto3" json:"country,omitempty"` +} + +func (x *LiveStreamMetrics) Reset() { + *x = LiveStreamMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_livestream_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LiveStreamMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LiveStreamMetrics) ProtoMessage() {} + +func (x *LiveStreamMetrics) ProtoReflect() protoreflect.Message { + mi := &file_livestream_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LiveStreamMetrics.ProtoReflect.Descriptor instead. +func (*LiveStreamMetrics) Descriptor() ([]byte, []int) { + return file_livestream_proto_rawDescGZIP(), []int{0} +} + +func (x *LiveStreamMetrics) GetClientIp() string { + if x != nil { + return x.ClientIp + } + return "" +} + +func (x *LiveStreamMetrics) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *LiveStreamMetrics) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *LiveStreamMetrics) GetRoomId() string { + if x != nil { + return x.RoomId + } + return "" +} + +func (x *LiveStreamMetrics) GetVideoBps() int64 { + if x != nil { + return x.VideoBps + } + return 0 +} + +func (x *LiveStreamMetrics) GetVideoFps() int64 { + if x != nil { + return x.VideoFps + } + return 0 +} + +func (x *LiveStreamMetrics) GetVideoRtt() int64 { + if x != nil { + return x.VideoRtt + } + return 0 +} + +func (x *LiveStreamMetrics) GetVideoLostPps() int64 { + if x != nil { + return x.VideoLostPps + } + return 0 +} + +func (x *LiveStreamMetrics) GetVideoLongestFreezeDuration() int64 { + if x != nil { + return x.VideoLongestFreezeDuration + } + return 0 +} + +func (x *LiveStreamMetrics) GetVideoTotalFreezeDuration() int64 { + if x != nil { + return x.VideoTotalFreezeDuration + } + return 0 +} + +func (x *LiveStreamMetrics) GetReportTimestamp() int64 { + if x != nil { + return x.ReportTimestamp + } + return 0 +} + +func (x *LiveStreamMetrics) GetCountry() string { + if x != nil { + return x.Country + } + return "" +} + +var File_livestream_proto protoreflect.FileDescriptor + +var file_livestream_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x6c, 0x69, 0x76, 0x65, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xc5, 0x03, 0x0a, 0x11, 0x4c, + 0x69, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x12, 0x1d, 0x0a, + 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x6f, 0x6f, 0x6d, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x6d, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x5f, 0x62, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x42, 0x70, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x69, 0x64, 0x65, 0x6f, 0x5f, 0x66, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x76, 0x69, 0x64, 0x65, 0x6f, 0x46, 0x70, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x69, 0x64, 0x65, + 0x6f, 0x5f, 0x72, 0x74, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x76, 0x69, 0x64, + 0x65, 0x6f, 0x52, 0x74, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x5f, 0x6c, + 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x70, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x76, + 0x69, 0x64, 0x65, 0x6f, 0x4c, 0x6f, 0x73, 0x74, 0x50, 0x70, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x76, + 0x69, 0x64, 0x65, 0x6f, 0x5f, 0x6c, 0x6f, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x72, 0x65, + 0x65, 0x7a, 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x1a, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x4c, 0x6f, 0x6e, 0x67, 0x65, 0x73, 0x74, + 0x46, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, + 0x0a, 0x1b, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, + 0x65, 0x65, 0x7a, 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x18, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x46, + 0x72, 0x65, 0x65, 0x7a, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, + 0x10, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x72, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x12, 0x5a, 0x10, 0x6c, 0x69, 0x76, 0x65, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_livestream_proto_rawDescOnce sync.Once + file_livestream_proto_rawDescData = file_livestream_proto_rawDesc +) + +func file_livestream_proto_rawDescGZIP() []byte { + file_livestream_proto_rawDescOnce.Do(func() { + file_livestream_proto_rawDescData = protoimpl.X.CompressGZIP(file_livestream_proto_rawDescData) + }) + return file_livestream_proto_rawDescData +} + +var file_livestream_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_livestream_proto_goTypes = []interface{}{ + (*LiveStreamMetrics)(nil), // 0: schema.LiveStreamMetrics +} +var file_livestream_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_livestream_proto_init() } +func file_livestream_proto_init() { + if File_livestream_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_livestream_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LiveStreamMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_livestream_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_livestream_proto_goTypes, + DependencyIndexes: file_livestream_proto_depIdxs, + MessageInfos: file_livestream_proto_msgTypes, + }.Build() + File_livestream_proto = out.File + file_livestream_proto_rawDesc = nil + file_livestream_proto_goTypes = nil + file_livestream_proto_depIdxs = nil +} diff --git a/integration_tests/datagen/load_gen.go b/integration_tests/datagen/load_gen.go new file mode 100644 index 0000000000000..12548e34df8c9 --- /dev/null +++ b/integration_tests/datagen/load_gen.go @@ -0,0 +1,133 @@ +package main + +import ( + "context" + "datagen/ad_click" + "datagen/ad_ctr" + "datagen/cdn_metrics" + "datagen/clickstream" + "datagen/delivery" + "datagen/ecommerce" + "datagen/gen" + "datagen/livestream" + "datagen/nexmark" + "datagen/sink" + "datagen/sink/kafka" + "datagen/sink/kinesis" + "datagen/sink/mysql" + "datagen/sink/postgres" + "datagen/sink/pulsar" + "datagen/twitter" + "fmt" + "log" + "time" + + "go.uber.org/ratelimit" +) + +func createSink(ctx context.Context, cfg gen.GeneratorConfig) (sink.Sink, error) { + if cfg.Sink == "postgres" { + return postgres.OpenPostgresSink(cfg.Postgres) + } else if cfg.Sink == "mysql" { + return mysql.OpenMysqlSink(cfg.Mysql) + } else if cfg.Sink == "kafka" { + return kafka.OpenKafkaSink(ctx, cfg.Kafka) + } else if cfg.Sink == "pulsar" { + return pulsar.OpenPulsarSink(ctx, cfg.Pulsar) + } else if cfg.Sink == "kinesis" { + return kinesis.OpenKinesisSink(cfg.Kinesis) + } else { + return nil, fmt.Errorf("invalid sink type: %s", cfg.Sink) + } +} + +// newgen creates a new generator based on the given config. +func newGen(cfg gen.GeneratorConfig) (gen.LoadGenerator, error) { + if cfg.Mode == "ad-click" { + return ad_click.NewAdClickGen(), nil + } else if cfg.Mode == "ad-ctr" { + return ad_ctr.NewAdCtrGen(), nil + } else if cfg.Mode == "twitter" { + return twitter.NewTwitterGen(), nil + } else if cfg.Mode == "cdn-metrics" { + return cdn_metrics.NewCdnMetricsGen(cfg), nil + } else if cfg.Mode == "clickstream" { + return clickstream.NewClickStreamGen(), nil + } else if cfg.Mode == "ecommerce" { + return ecommerce.NewEcommerceGen(), nil + } else if cfg.Mode == "delivery" { + return delivery.NewOrderEventGen(cfg), nil + } else if cfg.Mode == "livestream" || cfg.Mode == "superset" { + return livestream.NewLiveStreamMetricsGen(cfg), nil + } else if cfg.Mode == "nexmark" { + return nexmark.NewNexmarkGen(cfg), nil + } else { + return nil, fmt.Errorf("invalid mode: %s", cfg.Mode) + } +} + +// spawnGen spawns one or more goroutines to generate data and send it to outCh. +func spawnGen(ctx context.Context, cfg gen.GeneratorConfig, outCh chan<- sink.SinkRecord) (gen.LoadGenerator, error) { + gen, err := newGen(cfg) + if err != nil { + return nil, err + } + go gen.Load(ctx, outCh) + return gen, nil +} + +// generateLoad generates data and sends it to the given sink. +func generateLoad(ctx context.Context, cfg gen.GeneratorConfig) error { + sinkImpl, err := createSink(ctx, cfg) + if err != nil { + return err + } + defer func() { + if err = sinkImpl.Close(); err != nil { + log.Print(err) + } + }() + + outCh := make(chan sink.SinkRecord, 1000) + gen, err := spawnGen(ctx, cfg, outCh) + if err != nil { + return err + } + + err = sinkImpl.Prepare(gen.KafkaTopics()) + if err != nil { + return err + } + + count := int64(0) + initTime := time.Now() + prevTime := time.Now() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + rl := ratelimit.New(cfg.Qps, ratelimit.WithoutSlack) // per second + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + if time.Since(prevTime) >= 10*time.Second { + log.Printf("Sent %d records in total (Elapsed: %s)", count, time.Since(initTime).String()) + prevTime = time.Now() + } + case record := <-outCh: + if cfg.PrintInsert { + fmt.Println(record.ToPostgresSql()) + } + // Consume records from the channel and send to sink. + if err := sinkImpl.WriteRecord(ctx, cfg.Format, record); err != nil { + return err + } + _ = rl.Take() + count++ + if time.Since(prevTime) >= 10*time.Second { + log.Printf("Sent %d records in total (Elapsed: %s)", count, time.Since(initTime).String()) + prevTime = time.Now() + } + } + } +} diff --git a/integration_tests/datagen/main.go b/integration_tests/datagen/main.go new file mode 100644 index 0000000000000..404934464868d --- /dev/null +++ b/integration_tests/datagen/main.go @@ -0,0 +1,214 @@ +package main + +import ( + "context" + "datagen/gen" + "log" + "os" + "os/signal" + "syscall" + + "github.com/urfave/cli" +) + +var cfg gen.GeneratorConfig = gen.GeneratorConfig{} + +func runCommand() error { + terminateCh := make(chan os.Signal, 1) + signal.Notify(terminateCh, os.Interrupt, syscall.SIGTERM) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-terminateCh + log.Println("Cancelled") + cancel() + }() + return generateLoad(ctx, cfg) +} + +func main() { + + app := &cli.App{ + Commands: []cli.Command{ + { + Name: "postgres", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "host", + Usage: "The host address of the PostgreSQL server", + Required: false, + Value: "localhost", + Destination: &cfg.Postgres.DbHost, + }, + cli.StringFlag{ + Name: "db", + Usage: "The database where the target table is located", + Required: false, + Value: "dev", + Destination: &cfg.Postgres.Database, + }, + cli.IntFlag{ + Name: "port", + Usage: "The port of the PostgreSQL server", + Required: false, + Value: 4566, + Destination: &cfg.Postgres.DbPort, + }, + cli.StringFlag{ + Name: "user", + Usage: "The user to Postgres", + Required: false, + Value: "root", + Destination: &cfg.Postgres.DbUser, + }, + }, + Action: func(c *cli.Context) error { + cfg.Sink = "postgres" + return runCommand() + }, + }, + { + Name: "mysql", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "host", + Usage: "The host address of the MySQL server", + Required: false, + Value: "localhost", + Destination: &cfg.Mysql.DbHost, + }, + cli.StringFlag{ + Name: "db", + Usage: "The database where the target table is located", + Required: false, + Value: "mydb", + Destination: &cfg.Mysql.Database, + }, + cli.IntFlag{ + Name: "port", + Usage: "The port of the MySQL server", + Required: false, + Value: 3306, + Destination: &cfg.Mysql.DbPort, + }, + cli.StringFlag{ + Name: "user", + Usage: "The user to MySQL", + Required: false, + Value: "mysqluser", + Destination: &cfg.Mysql.DbUser, + }, + cli.StringFlag{ + Name: "password", + Usage: "The password to MySQL", + Required: false, + Value: "mysqlpw", + Destination: &cfg.Mysql.DbPassword, + }, + }, + Action: func(c *cli.Context) error { + cfg.Sink = "mysql" + return runCommand() + }, + }, + { + Name: "kafka", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "brokers", + Usage: "Kafka bootstrap brokers to connect to, as a comma separated list", + Required: true, + Destination: &cfg.Kafka.Brokers, + }, + cli.BoolFlag{ + Name: "no-recreate", + Usage: "Do not recreate the Kafka topic when it exists.", + Required: false, + Destination: &cfg.Kafka.NoRecreateIfExists, + }, + }, + Action: func(c *cli.Context) error { + cfg.Sink = "kafka" + return runCommand() + }, + HelpName: "datagen kafka", + }, + { + Name: "pulsar", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "brokers", + Usage: "Pulsar brokers to connect to, as a comma separated list", + Required: true, + Destination: &cfg.Pulsar.Brokers, + }, + }, + Action: func(c *cli.Context) error { + cfg.Sink = "pulsar" + return runCommand() + }, + HelpName: "datagen pulsar", + }, + { + Name: "kinesis", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "region", + Usage: "The region where the Kinesis stream resides", + Required: true, + Destination: &cfg.Kinesis.Region, + }, + cli.StringFlag{ + Name: "name", + Usage: "The Kinesis stream name", + Required: true, + Destination: &cfg.Kinesis.StreamName, + }, + }, + Action: func(c *cli.Context) error { + cfg.Sink = "kinesis" + return runCommand() + }, + HelpName: "datagen kinesis", + }, + }, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "print", + Usage: "Whether to print the content of every event", + Required: false, + Destination: &cfg.PrintInsert, + }, + cli.IntFlag{ + Name: "qps", + Usage: "Number of messages to send per second", + Required: false, + Value: 1, + Destination: &cfg.Qps, + }, + cli.StringFlag{ + Name: "mode", + Usage: "ad-click | ad-ctr | twitter | cdn-metrics | clickstream | ecommerce | delivery | livestream", + Required: true, + Destination: &cfg.Mode, + }, + cli.StringFlag{ + Name: "format", + Usage: "The output record format: json | protobuf. Used when the sink is a message queue.", + Value: "json", + Required: false, + Destination: &cfg.Format, + }, + cli.BoolFlag{ + Name: "heavytail", + Usage: "Whether the tail probability is high. If true We will use uniform distribution for randomizing values.", + Required: false, + Destination: &cfg.HeavyTail, + }, + }, + } + err := app.Run(os.Args) + if err != nil { + log.Fatalln(err) + } +} diff --git a/integration_tests/datagen/nexmark/auction.go b/integration_tests/datagen/nexmark/auction.go new file mode 100644 index 0000000000000..5bc293268da41 --- /dev/null +++ b/integration_tests/datagen/nexmark/auction.go @@ -0,0 +1,67 @@ +package nexmark + +import ( + "context" + "datagen/gen" + "datagen/sink" + "encoding/json" + "fmt" + "time" + + "github.com/brianvoe/gofakeit/v6" +) + +type auction struct { + sink.BaseSinkRecord + + Id int `json:"id"` + ItemName string `json:"item_name"` + DateTime int64 `json:"date_time"` + Seller int `json:"seller"` + Category int `json:"category"` +} + +func (r *auction) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "auction", fmt.Sprint(r.Id), data +} + +type auctionGen struct { + faker *gofakeit.Faker + + nextAuctionId int +} + +func NewNexmarkGen(cfg gen.GeneratorConfig) gen.LoadGenerator { + return &auctionGen{ + faker: gofakeit.New(0), + nextAuctionId: 1000, + } +} + +func (g *auctionGen) generate() sink.SinkRecord { + g.nextAuctionId++ + return &auction{ + Id: g.nextAuctionId, + ItemName: g.faker.FarmAnimal(), + DateTime: time.Now().Unix(), + Seller: g.faker.Number(1000, 1099), + Category: g.faker.Number(1, 20), + } +} + +func (g *auctionGen) KafkaTopics() []string { + // We generate the auction table only. + return []string{"auction"} +} + +func (g *auctionGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + record := g.generate() + select { + case outCh <- record: + case <-ctx.Done(): + return + } + } +} diff --git a/integration_tests/datagen/sink/kafka/kafka.go b/integration_tests/datagen/sink/kafka/kafka.go new file mode 100644 index 0000000000000..f01a116629e4f --- /dev/null +++ b/integration_tests/datagen/sink/kafka/kafka.go @@ -0,0 +1,139 @@ +package kafka + +import ( + "context" + "datagen/sink" + "fmt" + "log" + "strings" + "time" + + "github.com/Shopify/sarama" +) + +type KafkaConfig struct { + Brokers string + + // Do not recreate the Kafka topic when it exists. The default value is false. + // It can be enabled if datagen is not authorized to create topic. + NoRecreateIfExists bool +} + +type KafkaSink struct { + admin sarama.ClusterAdmin + cfg KafkaConfig + client sarama.AsyncProducer +} + +func newKafkaConfig() *sarama.Config { + version, err := sarama.ParseKafkaVersion("1.1.1") + if err != nil { + panic(fmt.Sprintf("failed to parse Kafka version: %v", err)) + } + config := sarama.NewConfig() + config.Version = version + config.Net.DialTimeout = 3 * time.Second + config.Admin.Timeout = 5 * time.Second + config.Producer.Timeout = 5 * time.Second + return config +} + +func OpenKafkaSink(ctx context.Context, cfg KafkaConfig) (*KafkaSink, error) { + admin, err := sarama.NewClusterAdmin(strings.Split(cfg.Brokers, ","), newKafkaConfig()) + if err != nil { + return nil, err + } + topics, err := admin.ListTopics() + if err != nil { + return nil, err + } + var topicNames []string + for k := range topics { + topicNames = append(topicNames, k) + } + log.Printf("Existing topics: %s", topicNames) + client, err := sarama.NewAsyncProducer(strings.Split(cfg.Brokers, ","), newKafkaConfig()) + if err != nil { + return nil, fmt.Errorf("NewAsyncProducer failed: %v", err) + } + p := &KafkaSink{ + admin: admin, + cfg: cfg, + client: client, + } + go func() { + p.consumeSuccesses(ctx) + }() + return p, nil +} + +func (p *KafkaSink) consumeSuccesses(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-p.client.Successes(): + } + } +} + +func (p *KafkaSink) createRequiredTopics(admin sarama.ClusterAdmin, keys []string) error { + topics, err := admin.ListTopics() + if err != nil { + return err + } + for _, t := range keys { + if err := p.createTopic(admin, t, topics); err != nil { + return err + } + } + return nil +} + +func (p *KafkaSink) createTopic(admin sarama.ClusterAdmin, key string, topics map[string]sarama.TopicDetail) error { + _, exists := topics[key] + if p.cfg.NoRecreateIfExists { + if exists { + // The topic already exists, and we don't want to recreate it. + return nil + } else { + return fmt.Errorf("topic \"%s\" does not exist", key) + } + } + if exists { + // Recreate the topic if it exists. + if err := admin.DeleteTopic(key); err != nil { + log.Printf("Deleted an existing topic: %s", key) + return err + } + } + log.Printf("Creating topic: %s", key) + return admin.CreateTopic(key, &sarama.TopicDetail{ + NumPartitions: 16, + ReplicationFactor: 1, + }, false) +} + +func (p *KafkaSink) Prepare(topics []string) error { + return p.createRequiredTopics(p.admin, topics) +} + +func (p *KafkaSink) Close() error { + p.client.AsyncClose() + return nil +} + +func (p *KafkaSink) WriteRecord(ctx context.Context, format string, record sink.SinkRecord) error { + topic, key, data := sink.RecordToKafka(record, format) + msg := &sarama.ProducerMessage{} + msg.Topic = topic + msg.Key = sarama.StringEncoder(key) + msg.Value = sarama.ByteEncoder(data) + select { + case <-ctx.Done(): + case p.client.Input() <- msg: + case err := <-p.client.Errors(): + log.Printf("failed to produce message: %s", err) + } + return nil +} diff --git a/integration_tests/datagen/sink/kinesis/kinesis.go b/integration_tests/datagen/sink/kinesis/kinesis.go new file mode 100644 index 0000000000000..139736f24c276 --- /dev/null +++ b/integration_tests/datagen/sink/kinesis/kinesis.go @@ -0,0 +1,52 @@ +package kinesis + +import ( + "context" + "datagen/sink" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" +) + +type KinesisConfig struct { + StreamName string + Region string +} + +type KinesisSink struct { + client *kinesis.Kinesis + cfg KinesisConfig +} + +func OpenKinesisSink(cfg KinesisConfig) (*KinesisSink, error) { + ss := session.Must(session.NewSession()) + client := kinesis.New(ss, aws.NewConfig().WithRegion(cfg.Region)) + return &KinesisSink{ + client: client, + cfg: cfg, + }, nil +} + +func (p *KinesisSink) Prepare(topics []string) error { + return nil +} + +func (p *KinesisSink) Close() error { + return nil +} + +func (p *KinesisSink) WriteRecord(ctx context.Context, format string, record sink.SinkRecord) error { + _, key, data := sink.RecordToKafka(record, format) + _, err := p.client.PutRecordWithContext(ctx, &kinesis.PutRecordInput{ + Data: data, + PartitionKey: aws.String(key), + StreamName: aws.String(p.cfg.StreamName), + }) + if err != nil { + return fmt.Errorf("failed to write record to kinesis: %s", err) + } else { + return nil + } +} diff --git a/integration_tests/datagen/sink/mysql/mysql.go b/integration_tests/datagen/sink/mysql/mysql.go new file mode 100644 index 0000000000000..0831af85b4857 --- /dev/null +++ b/integration_tests/datagen/sink/mysql/mysql.go @@ -0,0 +1,51 @@ +package mysql + +import ( + "context" + "database/sql" + "datagen/sink" + "fmt" + + _ "github.com/go-sql-driver/mysql" +) + +type MysqlConfig struct { + DbHost string + Database string + DbPort int + DbUser string + DbPassword string +} + +type MysqlSink struct { + db *sql.DB +} + +func OpenMysqlSink(cfg MysqlConfig) (*MysqlSink, error) { + fmt.Printf("Opening MySQL sink: %+v\n", cfg) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?multiStatements=true", + cfg.DbUser, cfg.DbPassword, cfg.DbHost, cfg.DbPort, cfg.Database)) + if err != nil { + return nil, err + } + return &MysqlSink{db}, nil +} + +func (p *MysqlSink) Prepare(topics []string) error { + return nil +} + +func (p *MysqlSink) Close() error { + return p.db.Close() +} + +func (p *MysqlSink) WriteRecord(ctx context.Context, format string, record sink.SinkRecord) error { + // MySQL's INSERT INTO is compatible with Postgres's. + query := record.ToPostgresSql() + _, err := p.db.ExecContext(ctx, query) + if err != nil { + err = fmt.Errorf("failed to execute query '%s': %s", query, err) + } + return err +} diff --git a/integration_tests/datagen/sink/postgres/postgres.go b/integration_tests/datagen/sink/postgres/postgres.go new file mode 100644 index 0000000000000..f3698a93258bb --- /dev/null +++ b/integration_tests/datagen/sink/postgres/postgres.go @@ -0,0 +1,47 @@ +package postgres + +import ( + "context" + "database/sql" + "datagen/sink" + "fmt" + + _ "github.com/lib/pq" +) + +type PostgresConfig struct { + DbHost string + Database string + DbPort int + DbUser string +} + +type PostgresSink struct { + db *sql.DB +} + +func OpenPostgresSink(cfg PostgresConfig) (*PostgresSink, error) { + db, err := sql.Open("postgres", fmt.Sprintf("postgresql://%s:@%s:%d/%s?sslmode=disable", + cfg.DbUser, cfg.DbHost, cfg.DbPort, cfg.Database)) + if err != nil { + return nil, err + } + return &PostgresSink{db}, nil +} + +func (p *PostgresSink) Prepare(topics []string) error { + return nil +} + +func (p *PostgresSink) Close() error { + return p.db.Close() +} + +func (p *PostgresSink) WriteRecord(ctx context.Context, format string, record sink.SinkRecord) error { + query := record.ToPostgresSql() + _, err := p.db.ExecContext(ctx, query) + if err != nil { + err = fmt.Errorf("failed to execute query '%s': %s", query, err) + } + return err +} diff --git a/integration_tests/datagen/sink/pulsar/pulsar.go b/integration_tests/datagen/sink/pulsar/pulsar.go new file mode 100644 index 0000000000000..2566ce993c1db --- /dev/null +++ b/integration_tests/datagen/sink/pulsar/pulsar.go @@ -0,0 +1,60 @@ +package pulsar + +import ( + "context" + "datagen/sink" + "fmt" + + "github.com/apache/pulsar-client-go/pulsar" +) + +type PulsarConfig struct { + Brokers string +} + +type PulsarSink struct { + client pulsar.Client + producers map[string]pulsar.Producer +} + +func OpenPulsarSink(ctx context.Context, cfg PulsarConfig) (*PulsarSink, error) { + client, err := pulsar.NewClient(pulsar.ClientOptions{ + URL: fmt.Sprintf("pulsar://%s", cfg.Brokers), + }) + if err != nil { + return nil, err + } + return &PulsarSink{ + client: client, + producers: make(map[string]pulsar.Producer), + }, nil +} + +func (p *PulsarSink) Prepare(topics []string) error { + return nil +} + +func (p *PulsarSink) Close() error { + p.client.Close() + return nil +} + +func (p *PulsarSink) WriteRecord(ctx context.Context, format string, record sink.SinkRecord) error { + var err error + topic, key, data := sink.RecordToKafka(record, format) + producer, ok := p.producers[topic] + if !ok { + producer, err = p.client.CreateProducer(pulsar.ProducerOptions{ + Topic: topic, + }) + if err != nil { + return err + } + p.producers[topic] = producer + } + _, err = producer.Send(ctx, &pulsar.ProducerMessage{ + Value: data, + Key: key, + }) + return err +} diff --git a/integration_tests/datagen/sink/sink.go b/integration_tests/datagen/sink/sink.go new file mode 100644 index 0000000000000..4d4116c1f4153 --- /dev/null +++ b/integration_tests/datagen/sink/sink.go @@ -0,0 +1,63 @@ +package sink + +import ( + "context" +) + +type SinkRecord interface { + // Convert the event to an INSERT INTO command. + ToPostgresSql() string + + // Convert the event to a Kakfa message in JSON format. + // This interface will also be used for Pulsar and Kinesis. + ToJson() (topic string, key string, data []byte) + + // Convert the event to a Kakfa message in Protobuf format. + // This interface will also be used for Pulsar and Kinesis. + ToProtobuf() (topic string, key string, data []byte) + + // Convert the event to a Kakfa message in Avro format. + // This interface will also be used for Pulsar and Kinesis. + ToAvro() (topic string, key string, data []byte) +} + +type BaseSinkRecord struct { +} + +func (r BaseSinkRecord) ToPostgresSql() string { + panic("not implemented") +} + +func (r BaseSinkRecord) ToJson() (topic string, key string, data []byte) { + panic("not implemented") +} + +func (r BaseSinkRecord) ToProtobuf() (topic string, key string, data []byte) { + panic("not implemented") +} + +func (r BaseSinkRecord) ToAvro() (topic string, key string, data []byte) { + panic("not implemented") +} + +// Convert the event to a Kakfa message in the given format. +// This interface will also be used for Pulsar and Kinesis. +func RecordToKafka(r SinkRecord, format string) (topic string, key string, data []byte) { + if format == "json" { + return r.ToJson() + } else if format == "protobuf" { + return r.ToProtobuf() + } else if format == "avro" { + return r.ToAvro() + } else { + panic("unsupported format") + } +} + +type Sink interface { + Prepare(topics []string) error + + WriteRecord(ctx context.Context, format string, record SinkRecord) error + + Close() error +} diff --git a/integration_tests/datagen/twitter/avro.go b/integration_tests/datagen/twitter/avro.go new file mode 100644 index 0000000000000..df20780c3e0bd --- /dev/null +++ b/integration_tests/datagen/twitter/avro.go @@ -0,0 +1,45 @@ +package twitter + +import ( + "github.com/linkedin/goavro/v2" +) + +var AvroSchema string = ` +{ + "type": "record", + "name": "Event", + "fields": [ + { + "name": "data", + "type": "record", + "fields": [ + { "name": "id", "type": "string" }, + { "name": "text", "type": "string" }, + { "name": "lang", "type": "string" }, + { "name": "created_at", "type": "string" } + ] + }, + { + "name": "author", + "type": "record", + "fields": [ + { "name": "id", "type": "string" }, + { "name": "name", "type": "string" }, + { "name": "username", "type": "string" }, + { "name": "created_at", "type": "string" }, + { "name": "followers", "type": "long" } + ] + } + ] +} +` + +var AvroCodec *goavro.Codec = nil + +func init() { + var err error + AvroCodec, err = goavro.NewCodec(AvroSchema) + if err != nil { + panic(err) + } +} diff --git a/integration_tests/datagen/twitter/proto/twitter.pb.go b/integration_tests/datagen/twitter/proto/twitter.pb.go new file mode 100644 index 0000000000000..9938c8dd46725 --- /dev/null +++ b/integration_tests/datagen/twitter/proto/twitter.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: twitter.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *TweetData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Author *User `protobuf:"bytes,2,opt,name=author,proto3" json:"author,omitempty"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_twitter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_twitter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_twitter_proto_rawDescGZIP(), []int{0} +} + +func (x *Event) GetData() *TweetData { + if x != nil { + return x.Data + } + return nil +} + +func (x *Event) GetAuthor() *User { + if x != nil { + return x.Author + } + return nil +} + +type TweetData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"` + Lang string `protobuf:"bytes,3,opt,name=lang,proto3" json:"lang,omitempty"` + CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (x *TweetData) Reset() { + *x = TweetData{} + if protoimpl.UnsafeEnabled { + mi := &file_twitter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TweetData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TweetData) ProtoMessage() {} + +func (x *TweetData) ProtoReflect() protoreflect.Message { + mi := &file_twitter_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TweetData.ProtoReflect.Descriptor instead. +func (*TweetData) Descriptor() ([]byte, []int) { + return file_twitter_proto_rawDescGZIP(), []int{1} +} + +func (x *TweetData) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *TweetData) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *TweetData) GetLang() string { + if x != nil { + return x.Lang + } + return "" +} + +func (x *TweetData) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +type User struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + UserName string `protobuf:"bytes,3,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Followers int64 `protobuf:"varint,5,opt,name=followers,proto3" json:"followers,omitempty"` +} + +func (x *User) Reset() { + *x = User{} + if protoimpl.UnsafeEnabled { + mi := &file_twitter_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_twitter_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_twitter_proto_rawDescGZIP(), []int{2} +} + +func (x *User) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *User) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *User) GetUserName() string { + if x != nil { + return x.UserName + } + return "" +} + +func (x *User) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *User) GetFollowers() int64 { + if x != nil { + return x.Followers + } + return 0 +} + +var File_twitter_proto protoreflect.FileDescriptor + +var file_twitter_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x74, 0x77, 0x69, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0e, 0x74, 0x77, 0x69, 0x74, 0x74, 0x65, 0x72, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, + 0x64, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x77, 0x69, 0x74, 0x74, 0x65, 0x72, + 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x77, 0x65, 0x65, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x77, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x06, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x22, 0x62, 0x0a, 0x09, 0x54, 0x77, 0x65, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x04, 0x55, 0x73, + 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x73, + 0x42, 0x0f, 0x5a, 0x0d, 0x74, 0x77, 0x69, 0x74, 0x74, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_twitter_proto_rawDescOnce sync.Once + file_twitter_proto_rawDescData = file_twitter_proto_rawDesc +) + +func file_twitter_proto_rawDescGZIP() []byte { + file_twitter_proto_rawDescOnce.Do(func() { + file_twitter_proto_rawDescData = protoimpl.X.CompressGZIP(file_twitter_proto_rawDescData) + }) + return file_twitter_proto_rawDescData +} + +var file_twitter_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_twitter_proto_goTypes = []interface{}{ + (*Event)(nil), // 0: twitter.schema.Event + (*TweetData)(nil), // 1: twitter.schema.TweetData + (*User)(nil), // 2: twitter.schema.User +} +var file_twitter_proto_depIdxs = []int32{ + 1, // 0: twitter.schema.Event.data:type_name -> twitter.schema.TweetData + 2, // 1: twitter.schema.Event.author:type_name -> twitter.schema.User + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_twitter_proto_init() } +func file_twitter_proto_init() { + if File_twitter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_twitter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_twitter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TweetData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_twitter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*User); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_twitter_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_twitter_proto_goTypes, + DependencyIndexes: file_twitter_proto_depIdxs, + MessageInfos: file_twitter_proto_msgTypes, + }.Build() + File_twitter_proto = out.File + file_twitter_proto_rawDesc = nil + file_twitter_proto_goTypes = nil + file_twitter_proto_depIdxs = nil +} diff --git a/integration_tests/datagen/twitter/twitter.go b/integration_tests/datagen/twitter/twitter.go new file mode 100644 index 0000000000000..97731dc3aed2b --- /dev/null +++ b/integration_tests/datagen/twitter/twitter.go @@ -0,0 +1,167 @@ +package twitter + +import ( + "context" + "datagen/gen" + "datagen/sink" + "datagen/twitter/proto" + "encoding/json" + "fmt" + "math/rand" + "time" + + "github.com/brianvoe/gofakeit/v6" + protobuf "google.golang.org/protobuf/proto" +) + +type tweetData struct { + CreatedAt string `json:"created_at"` + Id string `json:"id"` + Text string `json:"text"` + Lang string `json:"lang"` +} + +type twitterEvent struct { + sink.BaseSinkRecord + + Data tweetData `json:"data"` + Author twitterUser `json:"author"` +} + +type twitterUser struct { + CreatedAt string `json:"created_at"` + Id string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Followers int `json:"followers"` +} + +func (r *twitterEvent) ToPostgresSql() string { + return fmt.Sprintf("INSERT INTO tweet (created_at, id, text, lang, author_id) values ('%s', '%s', '%s', '%s', '%s'); INSERT INTO user (created_at, id, name, username, followers) values ('%s', '%s', '%s', '%s', %d);", + r.Data.CreatedAt, r.Data.Id, r.Data.Text, r.Data.Lang, r.Author.Id, + r.Author.CreatedAt, r.Author.Id, r.Author.Name, r.Author.UserName, r.Author.Followers, + ) +} + +func (r *twitterEvent) ToJson() (topic string, key string, data []byte) { + data, _ = json.Marshal(r) + return "twitter", r.Data.Id, data +} + +func (r *twitterEvent) ToProtobuf() (topic string, key string, data []byte) { + m := proto.Event{ + Data: &proto.TweetData{ + CreatedAt: r.Data.CreatedAt, + Id: r.Data.Id, + Text: r.Data.Text, + Lang: r.Data.Lang, + }, + Author: &proto.User{ + CreatedAt: r.Author.CreatedAt, + Id: r.Author.Id, + Name: r.Author.Name, + UserName: r.Author.UserName, + Followers: int64(r.Author.Followers), + }, + } + data, err := protobuf.Marshal(&m) + if err != nil { + panic(err) + } + return "twitter", r.Data.Id, data +} + +func (r *twitterEvent) ToAvro() (topic string, key string, data []byte) { + obj := map[string]interface{}{ + "data": map[string]interface{}{ + "created_at": r.Data.CreatedAt, + "id": r.Data.Id, + "text": r.Data.Text, + "lang": r.Data.Lang, + }, + "author": map[string]interface{}{ + "created_at": r.Author.CreatedAt, + "id": r.Author.Id, + "name": r.Author.Name, + "username": r.Author.UserName, + "followers": r.Author.Followers, + }, + } + binary, err := AvroCodec.BinaryFromNative(nil, obj) + if err != nil { + panic(err) + } + return "twitter", r.Data.Id, binary +} + +type twitterGen struct { + faker *gofakeit.Faker + users []*twitterUser +} + +func NewTwitterGen() gen.LoadGenerator { + faker := gofakeit.New(0) + users := make(map[string]*twitterUser) + for len(users) < 100000 { + id := faker.DigitN(10) + if _, ok := users[id]; !ok { + endYear := time.Now().Year() - 1 + startYear := endYear - rand.Intn(8) + + endTime, _ := time.Parse("2006-01-01", fmt.Sprintf("%d-01-01", endYear)) + startTime, _ := time.Parse("2006-01-01", fmt.Sprintf("%d-01-01", startYear)) + users[id] = &twitterUser{ + CreatedAt: faker.DateRange(startTime, endTime).Format(gen.RwTimestampLayout), + Id: id, + Name: fmt.Sprintf("%s %s", faker.Name(), faker.Adverb()), + UserName: faker.Username(), + Followers: gofakeit.IntRange(1, 100000), + } + } + } + usersList := []*twitterUser{} + for _, u := range users { + usersList = append(usersList, u) + } + return &twitterGen{ + faker: faker, + users: usersList, + } +} + +func (t *twitterGen) generate() twitterEvent { + id := t.faker.DigitN(19) + author := t.users[rand.Intn(len(t.users))] + + wordsCnt := t.faker.IntRange(10, 20) + hashTagsCnt := t.faker.IntRange(0, 2) + hashTags := "" + for i := 0; i < hashTagsCnt; i++ { + hashTags += fmt.Sprintf("#%s ", t.faker.BuzzWord()) + } + sentence := fmt.Sprintf("%s%s", hashTags, t.faker.Sentence(wordsCnt)) + return twitterEvent{ + Data: tweetData{ + Id: id, + CreatedAt: time.Now().Format(gen.RwTimestampLayout), + Text: sentence, + Lang: gofakeit.Language(), + }, + Author: *author, + } +} + +func (t *twitterGen) KafkaTopics() []string { + return []string{"twitter"} +} + +func (t *twitterGen) Load(ctx context.Context, outCh chan<- sink.SinkRecord) { + for { + record := t.generate() + select { + case <-ctx.Done(): + return + case outCh <- &record: + } + } +} diff --git a/integration_tests/datagen/twitter/twitter_example.json b/integration_tests/datagen/twitter/twitter_example.json new file mode 100644 index 0000000000000..14c050eb3a847 --- /dev/null +++ b/integration_tests/datagen/twitter/twitter_example.json @@ -0,0 +1,14 @@ +{ + "data": { + "created_at": "2020-02-12T17:09:56.000Z", + "id": "1227640996038684673", + "text": "Doctors: Googling stuff online does not make you a doctor\n\nDevelopers: https://t.co/mrju5ypPkb", + "lang": "English" + }, + "author": { + "created_at": "2013-12-14T04:35:55.000Z", + "id": "2244994945", + "name": "singularity", + "username": "singular ritty" + } +} \ No newline at end of file diff --git a/integration_tests/delivery/delivery.sql b/integration_tests/delivery/delivery.sql new file mode 100644 index 0000000000000..157e0ba6e2ad2 --- /dev/null +++ b/integration_tests/delivery/delivery.sql @@ -0,0 +1,25 @@ +CREATE SOURCE delivery_orders_source ( + order_id BIGINT, + restaurant_id BIGINT, + order_state VARCHAR, + order_timestamp TIMESTAMP +) WITH ( + connector = 'kafka', + topic = 'delivery_orders', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; + + +CREATE MATERIALIZED VIEW restaurant_orders AS +SELECT + window_start, + restaurant_id, + COUNT(*) AS total_order +FROM + HOP(delivery_orders_source, order_timestamp, INTERVAL '1' MINUTE, INTERVAL '15' MINUTE) +WHERE + order_state = 'CREATED' +GROUP BY + restaurant_id, + window_start; diff --git a/integration_tests/delivery/docker-compose.yml b/integration_tests/delivery/docker-compose.yml new file mode 100644 index 0000000000000..dc93d884bc1b8 --- /dev/null +++ b/integration_tests/delivery/docker-compose.yml @@ -0,0 +1,60 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode delivery --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/ecommerce/ecommerce.sql b/integration_tests/ecommerce/ecommerce.sql new file mode 100644 index 0000000000000..434801743b13c --- /dev/null +++ b/integration_tests/ecommerce/ecommerce.sql @@ -0,0 +1,60 @@ +CREATE SOURCE order_events ( + order_id VARCHAR, + item_id VARCHAR, + item_price DOUBLE PRECISION, + event_timestamp TIMESTAMP +) WITH ( + connector = 'kafka', + topic = 'nics_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; + +CREATE TABLE order_events ( + order_id VARCHAR, + item_id VARCHAR, + item_price DOUBLE PRECISION, + event_timestamp TIMESTAMP +); + +CREATE TABLE parcel_events ( + order_id VARCHAR, + event_timestamp TIMESTAMP, + event_type VARCHAR +); + +CREATE MATERIALIZED VIEW order_details AS +SELECT + order_id, + SUM(item_price) AS total_price, + AVG(item_price) AS avg_price +FROM + order_events +GROUP BY + order_id; + +CREATE MATERIALIZED VIEW order_details AS +SELECT + t1.order_id AS order_id, + (t2.event_timestamp - t1.event_timestamp) as delivery_time +FROM + ( + SELECT + order_id, + event_timestamp + FROM + parcel_events + WHERE + event_type = 'order_created' + ) AS t1 + JOIN ( + SELECT + order_id, + event_timestamp + FROM + parcel_events + WHERE + event_type = 'parcel_shipped' + ) t2 ON t1.order_id = t2.order_id +WHERE + t2.event_timestamp - t1.event_timestamp > INTERVAL '7 days'; diff --git a/integration_tests/iceberg-sink/README.md b/integration_tests/iceberg-sink/README.md new file mode 100644 index 0000000000000..add6d72125673 --- /dev/null +++ b/integration_tests/iceberg-sink/README.md @@ -0,0 +1,36 @@ +# Demo: Sinking to Iceberg + +RisingWave only provides limited capabilities to serve complex ad-hoc queries, which typically require optimizations such as columnar storage and code generation (https://www.vldb.org/pvldb/vol4/p539-neumann.pdf). However, RisingWave's internal storage format is row-based, and we have not paid much attention to improving its batch-processing capability. Therefore, we recommend sinking the stream into Iceberg or another data lake to build a "streaming data warehouse" solution. + +In this demo, we want to showcase how RisingWave is able to sink data to Iceberg for big data analytics. + +1. Launch the cluster: + +```sh +docker compose up -d +``` + +The cluster contains a RisingWave cluster and its necessary dependencies, a Spark that will be used to create the Iceberg table, a datagen that generates the data, and a Presto for querying the Iceberg. + + +2. Create the Iceberg table: + +```sh +docker compose exec spark bash /spark-script/run-sql-file.sh create-table +``` + +3. Execute the SQL queries in sequence: + +- create_source.sql +- create_mv.sql +- create_sink.sql + +4. Connect to the Presto that is pre-installed in the docker compose and execute a simple query: + +``` +docker compose exec presto presto-cli --server localhost:8080 +``` + +```sql +select user_id, count(*) from iceberg.demo_db.demo_table group by user_id +``` diff --git a/integration_tests/iceberg-sink/create_mv.sql b/integration_tests/iceberg-sink/create_mv.sql new file mode 100644 index 0000000000000..0a803f8a2762d --- /dev/null +++ b/integration_tests/iceberg-sink/create_mv.sql @@ -0,0 +1,7 @@ +CREATE MATERIALIZED VIEW bhv_mv AS +SELECT + user_id, + target_id, + event_timestamp +FROM + user_behaviors; \ No newline at end of file diff --git a/integration_tests/iceberg-sink/create_sink.sql b/integration_tests/iceberg-sink/create_sink.sql new file mode 100644 index 0000000000000..9306b836f7744 --- /dev/null +++ b/integration_tests/iceberg-sink/create_sink.sql @@ -0,0 +1,13 @@ +CREATE SINK bhv_iceberg_sink +FROM + bhv_mv WITH ( + connector = 'iceberg', + type = 'upsert', + primary_key = 'user_id, target_id, event_timestamp', + warehouse.path = 's3://hummock001/iceberg-data', + s3.endpoint = 'http://minio-0:9301', + s3.access.key = 'hummockadmin', + s3.secret.key = 'hummockadmin', + database.name='demo_db', + table.name='demo_table' +); \ No newline at end of file diff --git a/integration_tests/iceberg-sink/create_source.sql b/integration_tests/iceberg-sink/create_source.sql new file mode 100644 index 0000000000000..efb1319cc3544 --- /dev/null +++ b/integration_tests/iceberg-sink/create_source.sql @@ -0,0 +1,19 @@ +CREATE TABLE user_behaviors ( + user_id VARCHAR, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp VARCHAR, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR, + PRIMARY KEY(user_id, target_id, event_timestamp) +) with ( + connector = 'mysql-cdc', + hostname = 'mysql', + port = '3306', + username = 'root', + password = '123456', + database.name = 'mydb', + table.name = 'user_behaviors', + server.id = '1' +); \ No newline at end of file diff --git a/integration_tests/iceberg-sink/docker-compose.yml b/integration_tests/iceberg-sink/docker-compose.yml new file mode 100644 index 0000000000000..89368e90cb505 --- /dev/null +++ b/integration_tests/iceberg-sink/docker-compose.yml @@ -0,0 +1,98 @@ +--- +version: "3" +services: + spark: + image: apache/spark:3.3.1 + command: tail -f /dev/null + depends_on: + - minio-0 + volumes: + - "./spark-script:/spark-script" + container_name: spark + presto: + build: ./presto-with-iceberg + container_name: presto + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + mysql: + image: mysql:8.0 + ports: + - "3306:3306" + environment: + - MYSQL_ROOT_PASSWORD=123456 + - MYSQL_USER=mysqluser + - MYSQL_PASSWORD=mysqlpw + - MYSQL_DATABASE=mydb + healthcheck: + test: [ "CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -u root -p123456" ] + interval: 5s + timeout: 5s + retries: 5 + container_name: mysql + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + prepare_mysql: + image: mysql:8.0 + depends_on: + - mysql + command: + - /bin/sh + - -c + - "mysql -p123456 -h mysql mydb < mysql_prepare.sql" + volumes: + - "./mysql_prepare.sql:/mysql_prepare.sql" + container_name: prepare_mysql + restart: on-failure + datagen: + build: ../datagen + depends_on: [mysql] + command: + - /bin/sh + - -c + - /datagen --mode clickstream --qps 1 mysql --user mysqluser --password mysqlpw --host mysql --port 3306 --db mydb + container_name: datagen + restart: on-failure +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false +name: risingwave-compose \ No newline at end of file diff --git a/integration_tests/iceberg-sink/iceberg-query.sql b/integration_tests/iceberg-sink/iceberg-query.sql new file mode 100644 index 0000000000000..67b7d9e831fa8 --- /dev/null +++ b/integration_tests/iceberg-sink/iceberg-query.sql @@ -0,0 +1 @@ +select user_id, count(*) from iceberg.demo_db.demo_table group by user_id \ No newline at end of file diff --git a/integration_tests/iceberg-sink/mysql_prepare.sql b/integration_tests/iceberg-sink/mysql_prepare.sql new file mode 100644 index 0000000000000..3e5a236a41205 --- /dev/null +++ b/integration_tests/iceberg-sink/mysql_prepare.sql @@ -0,0 +1,15 @@ +-- mysql -p123456 -uroot -h 127.0.0.1 mydb < mysql_prepare.sql +-- +-- Mysql +USE mydb; + +CREATE TABLE user_behaviors ( + user_id VARCHAR(60), + target_id VARCHAR(60), + target_type VARCHAR(60), + event_timestamp VARCHAR(100), + behavior_type VARCHAR(60), + parent_target_type VARCHAR(60), + parent_target_id VARCHAR(60), + PRIMARY KEY(user_id, target_id, event_timestamp) +); diff --git a/integration_tests/iceberg-sink/presto-with-iceberg/Dockerfile b/integration_tests/iceberg-sink/presto-with-iceberg/Dockerfile new file mode 100644 index 0000000000000..bb6fea50243c4 --- /dev/null +++ b/integration_tests/iceberg-sink/presto-with-iceberg/Dockerfile @@ -0,0 +1,5 @@ +FROM ahanaio/prestodb-sandbox + +COPY ./iceberg.properties /opt/presto-server/etc/catalog +COPY ./log.properties /opt/presto-server/etc +COPY ./hadoop-catalog.xml /etc/iceberg/conf/hadoop-catalog.xml \ No newline at end of file diff --git a/integration_tests/iceberg-sink/presto-with-iceberg/hadoop-catalog.xml b/integration_tests/iceberg-sink/presto-with-iceberg/hadoop-catalog.xml new file mode 100644 index 0000000000000..460072cfb47b0 --- /dev/null +++ b/integration_tests/iceberg-sink/presto-with-iceberg/hadoop-catalog.xml @@ -0,0 +1,22 @@ + + + presto.s3.endpoint + http://minio-0:9301 + + + presto.s3.path-style-access + true + + + presto.s3.access-key + hummockadmin + + + presto.s3.secret-key + hummockadmin + + + fs.s3a.impl + com.facebook.presto.hive.s3.PrestoS3FileSystem + + \ No newline at end of file diff --git a/integration_tests/iceberg-sink/presto-with-iceberg/iceberg.properties b/integration_tests/iceberg-sink/presto-with-iceberg/iceberg.properties new file mode 100644 index 0000000000000..ecccfc7fc8275 --- /dev/null +++ b/integration_tests/iceberg-sink/presto-with-iceberg/iceberg.properties @@ -0,0 +1,6 @@ +connector.name=iceberg +hive.metastore.uri=thrift://192.0.2.3:9083 +iceberg.catalog.type=hadoop +iceberg.catalog.warehouse=s3a://hummock001/iceberg-data +iceberg.hadoop.config.resources=/etc/iceberg/conf/hadoop-catalog.xml +hive.config.resources=/etc/iceberg/conf/hadoop-catalog.xml \ No newline at end of file diff --git a/integration_tests/iceberg-sink/presto-with-iceberg/log.properties b/integration_tests/iceberg-sink/presto-with-iceberg/log.properties new file mode 100644 index 0000000000000..208a8faf4176b --- /dev/null +++ b/integration_tests/iceberg-sink/presto-with-iceberg/log.properties @@ -0,0 +1,2 @@ +com.facebook.presto=DEBUG +org.apache.hadoop=DEBUG \ No newline at end of file diff --git a/integration_tests/iceberg-sink/spark-script/.gitignore b/integration_tests/iceberg-sink/spark-script/.gitignore new file mode 100644 index 0000000000000..51dcf07222856 --- /dev/null +++ b/integration_tests/iceberg-sink/spark-script/.gitignore @@ -0,0 +1,3 @@ +derby.log +metastore_db +.ivy \ No newline at end of file diff --git a/integration_tests/iceberg-sink/spark-script/create-table.sql b/integration_tests/iceberg-sink/spark-script/create-table.sql new file mode 100644 index 0000000000000..e609784d849c7 --- /dev/null +++ b/integration_tests/iceberg-sink/spark-script/create-table.sql @@ -0,0 +1,11 @@ +drop table if exists demo.demo_db.demo_table; + +CREATE TABLE demo.demo_db.demo_table +( + user_id string, + target_id string, + event_timestamp string +) TBLPROPERTIES ('format-version'='2'); + + + diff --git a/integration_tests/iceberg-sink/spark-script/query-table.sql b/integration_tests/iceberg-sink/spark-script/query-table.sql new file mode 100644 index 0000000000000..d305c6c51c72d --- /dev/null +++ b/integration_tests/iceberg-sink/spark-script/query-table.sql @@ -0,0 +1 @@ +SELECT user_id, count(*) from demo.demo_db.demo_table group by user_id; \ No newline at end of file diff --git a/integration_tests/iceberg-sink/spark-script/run-sql-file.sh b/integration_tests/iceberg-sink/spark-script/run-sql-file.sh new file mode 100644 index 0000000000000..15a9c9ffedd0b --- /dev/null +++ b/integration_tests/iceberg-sink/spark-script/run-sql-file.sh @@ -0,0 +1,13 @@ +set -ex + +/opt/spark/bin/spark-sql --packages org.apache.iceberg:iceberg-spark-runtime-3.2_2.12:1.1.0,org.apache.hadoop:hadoop-aws:3.3.2\ + --conf spark.jars.ivy=${HOME}/work-dir/.ivy2 \ + --conf spark.sql.catalog.demo=org.apache.iceberg.spark.SparkCatalog \ + --conf spark.sql.catalog.demo.type=hadoop \ + --conf spark.sql.catalog.demo.warehouse=s3a://hummock001/iceberg-data \ + --conf spark.sql.catalog.demo.hadoop.fs.s3a.endpoint=http://minio-0:9301 \ + --conf spark.sql.catalog.demo.hadoop.fs.s3a.path.style.access=true \ + --conf spark.sql.catalog.demo.hadoop.fs.s3a.access.key=hummockadmin \ + --conf spark.sql.catalog.demo.hadoop.fs.s3a.secret.key=hummockadmin \ + --conf spark.sql.defaultCatalog=demo \ + -f /spark-script/$1.sql \ No newline at end of file diff --git a/integration_tests/livestream/create_mv.sql b/integration_tests/livestream/create_mv.sql new file mode 100644 index 0000000000000..32dbd6f4a8c9b --- /dev/null +++ b/integration_tests/livestream/create_mv.sql @@ -0,0 +1,69 @@ +CREATE MATERIALIZED VIEW live_video_qos_10min AS +SELECT + window_start AS report_ts, + room_id, + SUM(video_total_freeze_duration) AS video_total_freeze_duration, + AVG(video_lost_pps) as video_lost_pps, + AVG(video_rtt) as video_rtt +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '10' MINUTE + ) +GROUP BY + window_start, + room_id; + +-- +-- +-- -- Unsupported yet. +-- CREATE MATERIALIZED VIEW blocked_user_ratio_10min AS +-- SELECT +-- window_start AS report_ts, +-- ( +-- COUNT() FILTER ( +-- WHERE +-- video_total_freeze_duration > 0 +-- ) / COUNT(DISTINCT user_id) :: DOUBLE PRECISION +-- ) AS blocked_user_ratio, +-- FROM +-- TUMBLE( +-- live_stream_metrics, +-- report_timestamp, +-- INTERVAL '10' MINUTE +-- ) +-- GROUP BY +-- window_start, +-- room_id; +-- +-- +-- +-- A real-time dashboard of the total UV. +CREATE MATERIALIZED VIEW total_user_visit_1min AS +SELECT + window_start AS report_ts, + COUNT(DISTINCT user_id) as uv +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '1' MINUTE + ) +GROUP BY + window_start; + +CREATE MATERIALIZED VIEW room_user_visit_1min AS +SELECT + window_start AS report_ts, + COUNT(DISTINCT user_id) as uv, + room_id +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '1' MINUTE + ) +GROUP BY + window_start, + room_id; \ No newline at end of file diff --git a/integration_tests/livestream/create_source.sql b/integration_tests/livestream/create_source.sql new file mode 100644 index 0000000000000..3e230af07f1b0 --- /dev/null +++ b/integration_tests/livestream/create_source.sql @@ -0,0 +1,26 @@ +CREATE SOURCE live_stream_metrics ( + client_ip VARCHAR, + user_agent VARCHAR, + user_id VARCHAR, + -- The live room. + room_id VARCHAR, + -- Sent bits per second. + video_bps BIGINT, + -- Sent frames per second. Typically 30 fps. + video_fps BIGINT, + -- Round-trip time (in ms). 200ms is recommended. + video_rtt BIGINT, + -- Lost packets per second. + video_lost_pps BIGINT, + -- How long was the longest freeze (in ms). + video_longest_freeze_duration BIGINT, + -- Total freeze duration. + video_total_freeze_duration BIGINT, + report_timestamp TIMESTAMPTZ, + country VARCHAR +) WITH ( + connector = 'kafka', + topic = 'live_stream_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/livestream/data_check b/integration_tests/livestream/data_check new file mode 100644 index 0000000000000..3aaf674ab9add --- /dev/null +++ b/integration_tests/livestream/data_check @@ -0,0 +1 @@ +live_stream_metrics,live_video_qos_10min,total_user_visit_1min,room_user_visit_1min \ No newline at end of file diff --git a/integration_tests/livestream/docker-compose.yml b/integration_tests/livestream/docker-compose.yml new file mode 100644 index 0000000000000..df1f39773d070 --- /dev/null +++ b/integration_tests/livestream/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode livestream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/livestream/livestream.proto b/integration_tests/livestream/livestream.proto new file mode 100644 index 0000000000000..125b66a744c1d --- /dev/null +++ b/integration_tests/livestream/livestream.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package livestream.schema; +option go_package = "livestream/proto"; + +message LiveStreamMetrics { + string client_ip = 1; + string user_agent = 2; + string user_id = 3; + string room_id = 4; + int64 video_bps = 5; + int64 video_fps = 6; + int64 video_rtt = 7; + int64 video_lost_pps = 8; + int64 video_longest_freeze_duration = 9; + int64 video_total_freeze_duration = 10; + int64 report_timestamp = 11; + string country = 12; +} diff --git a/integration_tests/livestream/pb/create_mv.sql b/integration_tests/livestream/pb/create_mv.sql new file mode 100644 index 0000000000000..4a2739d6921bb --- /dev/null +++ b/integration_tests/livestream/pb/create_mv.sql @@ -0,0 +1,62 @@ +CREATE MATERIALIZED VIEW live_stream_metrics AS +SELECT + client_ip, + user_agent, + user_id, + room_id, + video_bps, + video_fps, + video_rtt, + video_lost_pps, + video_longest_freeze_duration, + video_total_freeze_duration, + to_timestamp(report_timestamp) as report_timestamp, + country +FROM + live_stream_metrics_pb; + +CREATE MATERIALIZED VIEW live_video_qos_10min AS +SELECT + window_start AS report_ts, + room_id, + SUM(video_total_freeze_duration) AS video_total_freeze_duration, + AVG(video_lost_pps) as video_lost_pps, + AVG(video_rtt) as video_rtt +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '10' MINUTE + ) +GROUP BY + window_start, + room_id; + +-- A real-time dashboard of the total UV. +CREATE MATERIALIZED VIEW total_user_visit_1min AS +SELECT + window_start AS report_ts, + COUNT(DISTINCT user_id) as uv +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '1' MINUTE + ) +GROUP BY + window_start; + +CREATE MATERIALIZED VIEW room_user_visit_1min AS +SELECT + window_start AS report_ts, + COUNT(DISTINCT user_id) as uv, + room_id +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '1' MINUTE + ) +GROUP BY + window_start, + room_id; \ No newline at end of file diff --git a/integration_tests/livestream/pb/create_source.sql b/integration_tests/livestream/pb/create_source.sql new file mode 100644 index 0000000000000..5664a8502141f --- /dev/null +++ b/integration_tests/livestream/pb/create_source.sql @@ -0,0 +1,6 @@ +CREATE SOURCE live_stream_metrics_pb WITH ( + connector = 'kafka', + topic = 'live_stream_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT PROTOBUF MESSAGE 'livestream.schema.LiveStreamMetrics' ROW SCHEMA LOCATION 'http://file_server:8080/schema'; diff --git a/integration_tests/livestream/query.sql b/integration_tests/livestream/query.sql new file mode 100644 index 0000000000000..193c87cf4991d --- /dev/null +++ b/integration_tests/livestream/query.sql @@ -0,0 +1,19 @@ +--- TODO: we need now() for ad-hoc mode. +-- SELECT +-- * +-- FROM +-- thread_view_count +-- WHERE +-- window_time > ( +-- '2022-7-22 18:43' :: TIMESTAMP - INTERVAL '1 day' +-- ) +-- AND window_time < ( +-- '2022-7-22 18:43' :: TIMESTAMP - INTERVAL '1 day' + INTERVAL '10 minutes' +-- ) +-- AND target_id = 'thread83 +SELECT + * +FROM + live_video_qos_10min +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/livestream/schema b/integration_tests/livestream/schema new file mode 100644 index 0000000000000..08b5cd4852c7e --- /dev/null +++ b/integration_tests/livestream/schema @@ -0,0 +1,18 @@ + + +livestream.protolivestream.schema" +LiveStreamMetrics + client_ip ( RclientIp + +user_agent ( R userAgent +user_id ( RuserId +room_id ( RroomId + video_bps (RvideoBps + video_fps (RvideoFps + video_rtt (RvideoRtt$ +video_lost_pps (R videoLostPpsA +video_longest_freeze_duration (RvideoLongestFreezeDuration= +video_total_freeze_duration + (RvideoTotalFreezeDuration) +report_timestamp (RreportTimestamp +country ( RcountryBZlivestream/protobproto3 \ No newline at end of file diff --git a/integration_tests/mysql-cdc/create_mv.sql b/integration_tests/mysql-cdc/create_mv.sql new file mode 100644 index 0000000000000..86de17ee12e04 --- /dev/null +++ b/integration_tests/mysql-cdc/create_mv.sql @@ -0,0 +1,8 @@ +CREATE MATERIALIZED VIEW product_count AS +SELECT + product_id, + COUNT(*) as product_count +FROM + orders +GROUP BY + product_id; \ No newline at end of file diff --git a/integration_tests/mysql-cdc/create_source.sql b/integration_tests/mysql-cdc/create_source.sql new file mode 100644 index 0000000000000..1a35f6dae9855 --- /dev/null +++ b/integration_tests/mysql-cdc/create_source.sql @@ -0,0 +1,18 @@ +create table orders ( + order_id int, + order_date bigint, + customer_name varchar, + price decimal, + product_id int, + order_status smallint, + PRIMARY KEY (order_id) +) with ( + connector = 'mysql-cdc', + hostname = 'mysql', + port = '3306', + username = 'root', + password = '123456', + database.name = 'mydb', + table.name = 'orders', + server.id = '1' +); \ No newline at end of file diff --git a/integration_tests/mysql-cdc/data_check b/integration_tests/mysql-cdc/data_check new file mode 100644 index 0000000000000..1f4c99fed9650 --- /dev/null +++ b/integration_tests/mysql-cdc/data_check @@ -0,0 +1 @@ +orders,product_count \ No newline at end of file diff --git a/integration_tests/mysql-cdc/docker-compose.yml b/integration_tests/mysql-cdc/docker-compose.yml new file mode 100644 index 0000000000000..d25a91d8e9cfe --- /dev/null +++ b/integration_tests/mysql-cdc/docker-compose.yml @@ -0,0 +1,78 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + mysql: + image: mysql:8.0 + ports: + - "3306:3306" + environment: + - MYSQL_ROOT_PASSWORD=123456 + - MYSQL_USER=mysqluser + - MYSQL_PASSWORD=mysqlpw + - MYSQL_DATABASE=mydb + healthcheck: + test: [ "CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -u root -p123456" ] + interval: 5s + timeout: 5s + retries: 5 + container_name: mysql + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + datagen: + image: mysql:8.0 + depends_on: + - mysql + command: + - /bin/sh + - -c + - "mysql -p123456 -h mysql mydb < mysql_prepare.sql" + volumes: + - "./mysql_prepare.sql:/mysql_prepare.sql" + container_name: datagen + restart: on-failure +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false +name: risingwave-compose diff --git a/integration_tests/mysql-cdc/mysql_prepare.sql b/integration_tests/mysql-cdc/mysql_prepare.sql new file mode 100644 index 0000000000000..65758e11f3af7 --- /dev/null +++ b/integration_tests/mysql-cdc/mysql_prepare.sql @@ -0,0 +1,28 @@ +-- mysql -p123456 -uroot -h 127.0.0.1 mydb < mysql_prepare.sql +-- +-- Mysql +USE mydb; + +create table orders ( + order_id int, + order_date bigint, + customer_name varchar(200), + price decimal, + product_id int, + order_status smallint, + PRIMARY KEY (order_id) +); + +insert into + orders +values + (1, 1558430840000, 'Bob', 10.50, 1, 1), + (2, 1558430840001, 'Alice', 20.50, 2, 1), + ( + 3, + 1558430840002, + 'Alice', + 18.50, + 2, + 1 + ); \ No newline at end of file diff --git a/integration_tests/mysql-cdc/query.sql b/integration_tests/mysql-cdc/query.sql new file mode 100644 index 0000000000000..a66e5c24f78e2 --- /dev/null +++ b/integration_tests/mysql-cdc/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + orders +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/mysql-sink/create_mv.sql b/integration_tests/mysql-sink/create_mv.sql new file mode 100644 index 0000000000000..ea3ab9d71272a --- /dev/null +++ b/integration_tests/mysql-sink/create_mv.sql @@ -0,0 +1,16 @@ +CREATE MATERIALIZED VIEW target_count AS +SELECT + target_id, + COUNT(*) AS target_count +FROM + user_behaviors +GROUP BY + target_id; + +CREATE SINK target_count_mysql_sink +FROM + target_count WITH ( + connector = 'jdbc', + jdbc.url = 'jdbc:mysql://mysql:3306/mydb?user=root&password=123456', + table.name = 'target_count' + ); \ No newline at end of file diff --git a/integration_tests/mysql-sink/create_source.sql b/integration_tests/mysql-sink/create_source.sql new file mode 100644 index 0000000000000..7a9e3d3add4c8 --- /dev/null +++ b/integration_tests/mysql-sink/create_source.sql @@ -0,0 +1,14 @@ +CREATE SOURCE user_behaviors ( + user_id VARCHAR, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMPTZ, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR +) WITH ( + connector = 'kafka', + topic = 'user_behaviors', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/mysql-sink/data_check b/integration_tests/mysql-sink/data_check new file mode 100644 index 0000000000000..3835eb979b86e --- /dev/null +++ b/integration_tests/mysql-sink/data_check @@ -0,0 +1 @@ +user_behaviors,target_count \ No newline at end of file diff --git a/integration_tests/mysql-sink/docker-compose.yml b/integration_tests/mysql-sink/docker-compose.yml new file mode 100644 index 0000000000000..21651f2ac49b2 --- /dev/null +++ b/integration_tests/mysql-sink/docker-compose.yml @@ -0,0 +1,93 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + mysql: + image: mysql:8.0 + ports: + - "3306:3306" + environment: + - MYSQL_ROOT_PASSWORD=123456 + - MYSQL_USER=mysqluser + - MYSQL_PASSWORD=mysqlpw + - MYSQL_DATABASE=mydb + healthcheck: + test: [ "CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -u root -p123456" ] + interval: 5s + timeout: 5s + retries: 5 + container_name: mysql + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode clickstream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen + prepare_mysql: + image: mysql:8.0 + depends_on: + - mysql + command: + - /bin/sh + - -c + - "mysql -p123456 -h mysql mydb < mysql_prepare.sql" + volumes: + - "./mysql_prepare.sql:/mysql_prepare.sql" + container_name: prepare_mysql + restart: on-failure +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/mysql-sink/mysql_prepare.sql b/integration_tests/mysql-sink/mysql_prepare.sql new file mode 100644 index 0000000000000..ded9b4cec97cd --- /dev/null +++ b/integration_tests/mysql-sink/mysql_prepare.sql @@ -0,0 +1,4 @@ +CREATE TABLE target_count ( + target_id VARCHAR(128) primary key, + target_count BIGINT +); \ No newline at end of file diff --git a/integration_tests/mysql-sink/query.sql b/integration_tests/mysql-sink/query.sql new file mode 100644 index 0000000000000..e09c66a255f10 --- /dev/null +++ b/integration_tests/mysql-sink/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + target_count +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/postgres-cdc/create_mv.sql b/integration_tests/postgres-cdc/create_mv.sql new file mode 100644 index 0000000000000..ec31f79c941b7 --- /dev/null +++ b/integration_tests/postgres-cdc/create_mv.sql @@ -0,0 +1,28 @@ +CREATE MATERIALIZED VIEW city_population AS +SELECT + city, + COUNT(*) as population +FROM + person +GROUP BY + city; + +CREATE MATERIALIZED VIEW nexmark_q8 AS +SELECT + P.id, + P.name, + A.starttime +FROM + person as P + JOIN ( + SELECT + seller, + window_start AS starttime, + window_end AS endtime + FROM + TUMBLE(auction, date_time, INTERVAL '10' SECOND) + GROUP BY + seller, + window_start, + window_end + ) A ON P.id = A.seller; \ No newline at end of file diff --git a/integration_tests/postgres-cdc/create_source.sql b/integration_tests/postgres-cdc/create_source.sql new file mode 100644 index 0000000000000..cc08684f5f365 --- /dev/null +++ b/integration_tests/postgres-cdc/create_source.sql @@ -0,0 +1,41 @@ +create table person ( + "id" int, + "name" varchar, + "email_address" varchar, + "credit_card" varchar, + "city" varchar, + PRIMARY KEY ("id") +) with ( + connector = 'postgres-cdc', + hostname = 'postgres', + port = '5432', + username = 'myuser', + password = '123456', + database.name = 'mydb', + schema.name = 'public', + table.name = 'person', + slot.name = 'person' +); + +CREATE SOURCE t_auction ( + id BIGINT, + item_name VARCHAR, + date_time BIGINT, + seller INT, + category INT +) WITH ( + connector = 'kafka', + topic = 'auction', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; + +CREATE VIEW auction as +SELECT + id, + item_name, + to_timestamp(date_time) as date_time, + seller, + category +FROM + t_auction; \ No newline at end of file diff --git a/integration_tests/postgres-cdc/data_check b/integration_tests/postgres-cdc/data_check new file mode 100644 index 0000000000000..4e00aba632aaa --- /dev/null +++ b/integration_tests/postgres-cdc/data_check @@ -0,0 +1 @@ +person,city_population,nexmark_q8 \ No newline at end of file diff --git a/integration_tests/postgres-cdc/docker-compose.yml b/integration_tests/postgres-cdc/docker-compose.yml new file mode 100644 index 0000000000000..59a9f86f13cb3 --- /dev/null +++ b/integration_tests/postgres-cdc/docker-compose.yml @@ -0,0 +1,96 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + # Use this command to connect to the DB from outside the container: + # docker exec postgres psql --username=myuser --dbname=mydb + postgres: + image: postgres + environment: + - POSTGRES_USER=myuser + - POSTGRES_PASSWORD=123456 + - POSTGRES_DB=mydb + ports: + - 5432:5432 + healthcheck: + test: [ "CMD-SHELL", "pg_isready --username=myuser --dbname=mydb" ] + interval: 5s + timeout: 5s + retries: 5 + command: [ "postgres", "-c", "wal_level=logical" ] + restart: always + container_name: postgres + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + postgres_prepare: + image: postgres + depends_on: + - postgres + command: + - /bin/sh + - -c + - "psql postgresql://myuser:123456@postgres:5432/mydb < postgres_prepare.sql" + volumes: + - "./postgres_prepare.sql:/postgres_prepare.sql" + container_name: postgres_prepare + restart: on-failure + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode nexmark --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/postgres-cdc/postgres_prepare.sql b/integration_tests/postgres-cdc/postgres_prepare.sql new file mode 100644 index 0000000000000..5a7c638011183 --- /dev/null +++ b/integration_tests/postgres-cdc/postgres_prepare.sql @@ -0,0 +1,112 @@ +create table person ( + "id" int, + "name" varchar(64), + "email_address" varchar(200), + "credit_card" varchar(200), + "city" varchar(200), + PRIMARY KEY ("id") +); + +ALTER TABLE + public.person REPLICA IDENTITY FULL; + +INSERT INTO person VALUES (1000, 'vicky noris', 'yplkvgz@qbxfg.com', '7878 5821 1864 2539', 'cheyenne'); +INSERT INTO person VALUES (1001, 'peter white', 'myckhsp@xpmpe.com', '1781 2313 8157 6974', 'boise'); +INSERT INTO person VALUES (1002, 'sarah spencer', 'wipvdbm@dkaap.com', '3453 4987 9481 6270', 'los angeles'); +INSERT INTO person VALUES (1003, 'vicky jones', 'kedmrpz@xiauh.com', '5536 1959 5460 2096', 'portland'); +INSERT INTO person VALUES (1004, 'julie white', 'egpemle@lrhcg.com', '0052 8113 1582 4430', 'seattle'); +INSERT INTO person VALUES (1005, 'sarah smith', 'siqjtnt@tjjek.com', '4591 5419 7260 8350', 'los angeles'); +INSERT INTO person VALUES (1006, 'walter white', 'fwdbytp@zepzq.com', '1327 3245 1956 8200', 'san francisco'); +INSERT INTO person VALUES (1007, 'walter spencer', 'ktncerj@jlikw.com', '5136 7504 2879 7886', 'los angeles'); +INSERT INTO person VALUES (1008, 'john abrams', 'jmvgsrq@nyfud.com', '6064 8548 6057 2021', 'redmond'); +INSERT INTO person VALUES (1009, 'peter noris', 'bhjbkpk@svzrx.com', '1063 2940 2119 8587', 'cheyenne'); +INSERT INTO person VALUES (1010, 'kate smith', 'fvsmqlb@grtho.com', '9474 6887 6463 6972', 'bend'); +INSERT INTO person VALUES (1011, 'vicky noris', 'chyakdh@acjjz.com', '9959 4034 5717 6729', 'boise'); +INSERT INTO person VALUES (1012, 'walter jones', 'utfqxal@sfxco.com', '8793 6517 3085 0542', 'boise'); +INSERT INTO person VALUES (1013, 'sarah walton', 'xdybqki@xrvkt.com', '2280 4209 8743 0735', 'kent'); +INSERT INTO person VALUES (1014, 'walter abrams', 'cbujzko@ehffe.com', '1235 3048 6067 9304', 'phoenix'); +INSERT INTO person VALUES (1015, 'vicky jones', 'xyygoyf@msejb.com', '3148 5012 3225 2870', 'los angeles'); +INSERT INTO person VALUES (1016, 'john walton', 'yzbccmz@hdnvm.com', '0426 2682 6145 8371', 'seattle'); +INSERT INTO person VALUES (1017, 'luke jones', 'yozosta@nzewf.com', '9641 9352 0248 2749', 'redmond'); +INSERT INTO person VALUES (1018, 'julie white', 'clhqozw@gioov.com', '3622 5461 2365 3624', 'bend'); +INSERT INTO person VALUES (1019, 'paul abrams', 'fshovpk@ayoej.com', '4433 7863 9751 7878', 'redmond'); +INSERT INTO person VALUES (1020, 'deiter smith', 'nqgdcpx@sumai.com', '0908 3870 4528 4710', 'boise'); +INSERT INTO person VALUES (1021, 'john walton', 'zzjwizw@skwdx.com', '2404 5072 3429 2483', 'phoenix'); +INSERT INTO person VALUES (1022, 'paul walton', 'zwhnjwb@ojuft.com', '0402 5453 9709 8030', 'portland'); +INSERT INTO person VALUES (1023, 'peter bartels', 'gwlteve@aikvf.com', '6555 8884 1360 0295', 'redmond'); +INSERT INTO person VALUES (1024, 'saul shultz', 'mghpttp@sxihm.com', '7987 2816 9818 8727', 'cheyenne'); +INSERT INTO person VALUES (1025, 'julie bartels', 'cxjfsuu@uwcpw.com', '0352 3457 2885 0266', 'san francisco'); +INSERT INTO person VALUES (1026, 'paul spencer', 'plcully@qwfas.com', '2017 1897 0926 6328', 'los angeles'); +INSERT INTO person VALUES (1027, 'luke white', 'jtatgee@wjaok.com', '2465 7541 1015 4655', 'portland'); +INSERT INTO person VALUES (1028, 'kate white', 'mmcqrfk@fldvr.com', '3696 3808 1329 0692', 'seattle'); +INSERT INTO person VALUES (1029, 'kate spencer', 'wkixktk@nqzin.com', '8540 3588 4648 5329', 'portland'); +INSERT INTO person VALUES (1030, 'sarah walton', 'bhinrlm@itvuw.com', '1009 7742 8888 9596', 'portland'); +INSERT INTO person VALUES (1031, 'luke abrams', 'tmoomlm@umwjm.com', '1161 4093 8361 3851', 'redmond'); +INSERT INTO person VALUES (1032, 'saul bartels', 'kkxmkbp@sjldo.com', '5311 2081 6147 8292', 'cheyenne'); +INSERT INTO person VALUES (1033, 'sarah smith', 'gixszyd@ikahc.com', '0654 0143 9916 7419', 'cheyenne'); +INSERT INTO person VALUES (1034, 'sarah spencer', 'wazwjxh@giysr.com', '8093 7447 4488 2464', 'los angeles'); +INSERT INTO person VALUES (1035, 'kate smith', 'xdtubdc@eoqat.com', '1880 7605 7505 3038', 'seattle'); +INSERT INTO person VALUES (1036, 'deiter white', 'lzxmcig@pfyrp.com', '8336 1080 3823 2249', 'los angeles'); +INSERT INTO person VALUES (1037, 'john jones', 'qdolslh@pzlry.com', '4394 1929 0794 1731', 'los angeles'); +INSERT INTO person VALUES (1038, 'walter spencer', 'ljboats@roguq.com', '5990 9981 6050 5247', 'bend'); +INSERT INTO person VALUES (1039, 'luke jones', 'sobojsi@vhqkh.com', '1406 2686 9359 7086', 'cheyenne'); +INSERT INTO person VALUES (1040, 'luke bartels', 'qtlduro@zijhv.com', '6662 1330 8131 8426', 'cheyenne'); +INSERT INTO person VALUES (1041, 'deiter jones', 'chmequx@mkfof.com', '2941 9597 1592 6346', 'phoenix'); +INSERT INTO person VALUES (1042, 'john smith', 'odilagg@ckwuo.com', '7919 0755 1682 9068', 'portland'); +INSERT INTO person VALUES (1043, 'vicky walton', 'nhcbcvg@kkqvz.com', '0031 6046 4743 7296', 'cheyenne'); +INSERT INTO person VALUES (1044, 'peter white', 'bigajpm@tslez.com', '6077 8921 3999 7697', 'bend'); +INSERT INTO person VALUES (1045, 'walter shultz', 'vaefysn@unvsg.com', '3638 3193 7385 6193', 'boise'); +INSERT INTO person VALUES (1046, 'saul abrams', 'zxfjtbp@fgwli.com', '4031 2701 7554 5688', 'cheyenne'); +INSERT INTO person VALUES (1047, 'saul jones', 'xyeymyt@otocr.com', '5732 1968 8707 8446', 'redmond'); +INSERT INTO person VALUES (1048, 'peter bartels', 'ysmazaq@rnpky.com', '4696 0667 3826 9971', 'san francisco'); +INSERT INTO person VALUES (1049, 'walter noris', 'zeeibrx@aljnm.com', '1484 3392 4739 2098', 'redmond'); +INSERT INTO person VALUES (1050, 'peter smith', 'kabfpld@fhfis.com', '5179 0198 7232 1932', 'boise'); +INSERT INTO person VALUES (1051, 'julie abrams', 'knmtfvw@lyiyz.com', '3687 0788 3300 6960', 'cheyenne'); +INSERT INTO person VALUES (1052, 'peter abrams', 'uweavbw@ijmcd.com', '9341 0308 6833 3448', 'portland'); +INSERT INTO person VALUES (1053, 'paul noris', 'hnijvou@zawwc.com', '1502 1867 0969 4737', 'seattle'); +INSERT INTO person VALUES (1054, 'sarah jones', 'kmhnjtg@cetsb.com', '3145 3266 2116 5290', 'cheyenne'); +INSERT INTO person VALUES (1055, 'kate abrams', 'gyocmgj@uimwr.com', '0552 0064 4476 2409', 'cheyenne'); +INSERT INTO person VALUES (1056, 'julie abrams', 'ckmoalu@ndgaj.com', '9479 9270 0678 6846', 'boise'); +INSERT INTO person VALUES (1057, 'julie white', 'chxvkez@djjaa.com', '3522 2797 5148 3246', 'cheyenne'); +INSERT INTO person VALUES (1058, 'walter abrams', 'rmfqwms@pvttk.com', '8478 3866 5662 6467', 'seattle'); +INSERT INTO person VALUES (1059, 'julie spencer', 'nykvghm@kdhpt.com', '9138 9947 8873 7763', 'kent'); +INSERT INTO person VALUES (1060, 'kate abrams', 'wqxypwn@jrafo.com', '5422 1018 4333 0049', 'portland'); +INSERT INTO person VALUES (1061, 'kate white', 'njkweqw@qlinl.com', '3254 1815 6422 1716', 'san francisco'); +INSERT INTO person VALUES (1062, 'luke bartels', 'emoramu@tkqmj.com', '7655 7679 5909 2251', 'portland'); +INSERT INTO person VALUES (1063, 'julie spencer', 'acpybcy@fygni.com', '0523 2583 3342 5588', 'portland'); +INSERT INTO person VALUES (1064, 'luke spencer', 'rxlzmbi@ftvjh.com', '3989 4985 1721 9240', 'los angeles'); +INSERT INTO person VALUES (1065, 'john jones', 'sdjpica@sfddi.com', '7716 1367 0259 3889', 'bend'); +INSERT INTO person VALUES (1066, 'paul white', 'gclssac@cjcqr.com', '2708 5518 8447 8022', 'kent'); +INSERT INTO person VALUES (1067, 'vicky bartels', 'qsurdwa@zcyxz.com', '9332 8313 3113 1752', 'cheyenne'); +INSERT INTO person VALUES (1068, 'john spencer', 'rvdbxjj@thhat.com', '2065 0039 4966 7017', 'phoenix'); +INSERT INTO person VALUES (1069, 'luke white', 'rlnjujw@yajij.com', '8511 7005 7854 1288', 'portland'); +INSERT INTO person VALUES (1070, 'sarah jones', 'hpuddzw@zqxub.com', '4625 1520 6481 1767', 'bend'); +INSERT INTO person VALUES (1071, 'luke shultz', 'uhlejag@whmqq.com', '3427 8456 9076 1714', 'kent'); +INSERT INTO person VALUES (1072, 'julie shultz', 'xzwbhur@otviv.com', '6404 5841 0949 2641', 'boise'); +INSERT INTO person VALUES (1073, 'vicky walton', 'ercndev@gequo.com', '8807 4321 6973 6085', 'boise'); +INSERT INTO person VALUES (1074, 'julie noris', 'jytjumk@fddus.com', '7463 7084 1696 8892', 'kent'); +INSERT INTO person VALUES (1075, 'julie bartels', 'hugijat@huhob.com', '4530 8776 7942 5085', 'los angeles'); +INSERT INTO person VALUES (1076, 'kate spencer', 'snqygzv@tsnwb.com', '2522 9594 4307 9831', 'boise'); +INSERT INTO person VALUES (1077, 'kate jones', 'lsshriy@aknvv.com', '7065 2545 7960 0041', 'portland'); +INSERT INTO person VALUES (1078, 'saul walton', 'xveffme@gcplt.com', '5848 5246 7319 1450', 'phoenix'); +INSERT INTO person VALUES (1079, 'vicky smith', 'fhcdtoq@aemjt.com', '3071 1822 6864 8221', 'los angeles'); +INSERT INTO person VALUES (1080, 'luke shultz', 'zlrbrav@pynxn.com', '2038 4905 4566 6031', 'phoenix'); +INSERT INTO person VALUES (1081, 'john shultz', 'giradrs@mavun.com', '3344 8962 5224 8904', 'portland'); +INSERT INTO person VALUES (1082, 'john bartels', 'nqxjwrg@ppebb.com', '7144 4781 7168 6500', 'los angeles'); +INSERT INTO person VALUES (1083, 'john white', 'kkcnemc@wcdej.com', '6683 7670 7530 0890', 'bend'); +INSERT INTO person VALUES (1084, 'walter abrams', 'bmjdpec@ynwal.com', '3594 8838 1244 9650', 'bend'); +INSERT INTO person VALUES (1085, 'deiter jones', 'xquhjkv@azyxm.com', '6385 5861 0188 6728', 'los angeles'); +INSERT INTO person VALUES (1086, 'vicky shultz', 'lwmmeqx@rvddr.com', '5916 6762 6797 4669', 'los angeles'); +INSERT INTO person VALUES (1087, 'vicky walton', 'askxzha@lachv.com', '2178 8782 4988 7051', 'bend'); +INSERT INTO person VALUES (1088, 'kate noris', 'tbalnld@nmxkq.com', '3240 6224 1233 7005', 'boise'); +INSERT INTO person VALUES (1089, 'vicky noris', 'grjawpy@zkyds.com', '2009 4332 9634 9823', 'boise'); +INSERT INTO person VALUES (1090, 'sarah bartels', 'hrpmxnr@rvzgq.com', '0733 1934 0398 7793', 'redmond'); +INSERT INTO person VALUES (1091, 'saul walton', 'ntqrfhp@oumoz.com', '8923 8221 6882 0275', 'bend'); +INSERT INTO person VALUES (1092, 'paul noris', 'qevgjyo@wubwo.com', '9303 3741 8490 6300', 'portland'); +INSERT INTO person VALUES (1093, 'peter white', 'cjbkbke@rtbye.com', '1188 2449 6471 5253', 'boise'); +INSERT INTO person VALUES (1094, 'kate smith', 'pbjnaxm@fbgld.com', '3054 4394 5921 6700', 'bend'); +INSERT INTO person VALUES (1095, 'luke spencer', 'iamwwkv@cujlu.com', '6643 2101 9195 1615', 'seattle'); +INSERT INTO person VALUES (1096, 'luke noris', 'amsxmdf@znzqj.com', '7291 3287 8055 7550', 'kent'); +INSERT INTO person VALUES (1097, 'walter abrams', 'djjgtgv@gdhku.com', '9089 0787 4194 7095', 'san francisco'); +INSERT INTO person VALUES (1098, 'kate spencer', 'suadlvi@makbh.com', '0823 4419 7875 1675', 'phoenix'); +INSERT INTO person VALUES (1099, 'sarah white', 'ynsyxew@rjjmk.com', '4049 9641 0911 0158', 'redmond'); diff --git a/integration_tests/postgres-cdc/query.sql b/integration_tests/postgres-cdc/query.sql new file mode 100644 index 0000000000000..6ea3da992e59c --- /dev/null +++ b/integration_tests/postgres-cdc/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + city_population +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/postgres-sink/README.md b/integration_tests/postgres-sink/README.md new file mode 100644 index 0000000000000..3066b8d37e3f9 --- /dev/null +++ b/integration_tests/postgres-sink/README.md @@ -0,0 +1,16 @@ +# HOW-TO + +This demo showcases how to sink RisingWave's data to an external Postgres. The data loader has been included in the docker compose so the data will be loaded to Postgres once the cluster is set up. + +Here's what this demo does: + +1. `docker compose up -d`: Start the cluster. +2. After 20-30s: `create_source.sql`. +3. After 10s: `create_mv.sql`. +4. After another 10s, the tester will check if the source has ingested some data by creating a materialized view upon the source. It also checks if the MV created in the 3rd step has some data. + +To connect to the Postgres on your local PC: + +```sh +psql postgresql://myuser:123456@127.0.0.1:5432/mydb +``` diff --git a/integration_tests/postgres-sink/create_mv.sql b/integration_tests/postgres-sink/create_mv.sql new file mode 100644 index 0000000000000..e9bdce80d7749 --- /dev/null +++ b/integration_tests/postgres-sink/create_mv.sql @@ -0,0 +1,16 @@ +CREATE MATERIALIZED VIEW target_count AS +SELECT + target_id, + COUNT(*) AS target_count +FROM + user_behaviors +GROUP BY + target_id; + +CREATE SINK target_count_postgres_sink +FROM + target_count WITH ( + connector = 'jdbc', + jdbc.url = 'jdbc:postgresql://postgres:5432/mydb?user=myuser&password=123456', + table.name = 'target_count' + ); \ No newline at end of file diff --git a/integration_tests/postgres-sink/create_source.sql b/integration_tests/postgres-sink/create_source.sql new file mode 100644 index 0000000000000..7a9e3d3add4c8 --- /dev/null +++ b/integration_tests/postgres-sink/create_source.sql @@ -0,0 +1,14 @@ +CREATE SOURCE user_behaviors ( + user_id VARCHAR, + target_id VARCHAR, + target_type VARCHAR, + event_timestamp TIMESTAMPTZ, + behavior_type VARCHAR, + parent_target_type VARCHAR, + parent_target_id VARCHAR +) WITH ( + connector = 'kafka', + topic = 'user_behaviors', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/postgres-sink/data_check b/integration_tests/postgres-sink/data_check new file mode 100644 index 0000000000000..3835eb979b86e --- /dev/null +++ b/integration_tests/postgres-sink/data_check @@ -0,0 +1 @@ +user_behaviors,target_count \ No newline at end of file diff --git a/integration_tests/postgres-sink/docker-compose.yml b/integration_tests/postgres-sink/docker-compose.yml new file mode 100644 index 0000000000000..cd8033ad2221b --- /dev/null +++ b/integration_tests/postgres-sink/docker-compose.yml @@ -0,0 +1,96 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode clickstream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen + # Use this command to connect to the DB from outside the container: + # docker exec postgres psql --username=myuser --dbname=mydb + postgres: + image: postgres + environment: + - POSTGRES_USER=myuser + - POSTGRES_PASSWORD=123456 + - POSTGRES_DB=mydb + ports: + - 5432:5432 + healthcheck: + test: [ "CMD-SHELL", "pg_isready --username=myuser --dbname=mydb" ] + interval: 5s + timeout: 5s + retries: 5 + command: [ "postgres", "-c", "wal_level=logical" ] + restart: always + container_name: postgres + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + prepare_postgres: + image: postgres + depends_on: + - postgres + command: + - /bin/sh + - -c + - "psql postgresql://myuser:123456@postgres:5432/mydb < postgres_prepare.sql" + volumes: + - "./postgres_prepare.sql:/postgres_prepare.sql" + container_name: prepare_postgres + restart: on-failure +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/postgres-sink/postgres_prepare.sql b/integration_tests/postgres-sink/postgres_prepare.sql new file mode 100644 index 0000000000000..ded9b4cec97cd --- /dev/null +++ b/integration_tests/postgres-sink/postgres_prepare.sql @@ -0,0 +1,4 @@ +CREATE TABLE target_count ( + target_id VARCHAR(128) primary key, + target_count BIGINT +); \ No newline at end of file diff --git a/integration_tests/postgres-sink/query.sql b/integration_tests/postgres-sink/query.sql new file mode 100644 index 0000000000000..e09c66a255f10 --- /dev/null +++ b/integration_tests/postgres-sink/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + target_count +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/prometheus/create_mv.sql b/integration_tests/prometheus/create_mv.sql new file mode 100644 index 0000000000000..81be629ccdbea --- /dev/null +++ b/integration_tests/prometheus/create_mv.sql @@ -0,0 +1,16 @@ +create materialized view metric_avg_30s as +select + name as metric_name, + window_start as metric_time, + avg(value :: decimal) as metric_value +from + tumble( + prometheus, + timestamp, + interval '30 s' + ) +group by + name, + window_start +order by + window_start; \ No newline at end of file diff --git a/integration_tests/prometheus/create_source.sql b/integration_tests/prometheus/create_source.sql new file mode 100644 index 0000000000000..b67e8676e00b4 --- /dev/null +++ b/integration_tests/prometheus/create_source.sql @@ -0,0 +1,13 @@ +CREATE SOURCE prometheus ( + labels STRUCT < __name__ VARCHAR, + instance VARCHAR, + job VARCHAR >, + name VARCHAR, + timestamp TIMESTAMPTZ, + value VARCHAR +) WITH ( + connector = 'kafka', + topic = 'prometheus', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/prometheus/create_user.sql b/integration_tests/prometheus/create_user.sql new file mode 100644 index 0000000000000..791e376a90916 --- /dev/null +++ b/integration_tests/prometheus/create_user.sql @@ -0,0 +1,6 @@ +create user grafanareader with password 'password'; + +-- It is recommended to use a dedicated read-only user when querying the database using Grafana. +grant +select + on materialized view metric_avg_30s to grafanareader; \ No newline at end of file diff --git a/integration_tests/prometheus/data_check b/integration_tests/prometheus/data_check new file mode 100644 index 0000000000000..6a39c46f26f9c --- /dev/null +++ b/integration_tests/prometheus/data_check @@ -0,0 +1 @@ +prometheus,metric_avg_30s \ No newline at end of file diff --git a/integration_tests/prometheus/docker-compose.yml b/integration_tests/prometheus/docker-compose.yml new file mode 100644 index 0000000000000..e716e567e2fd4 --- /dev/null +++ b/integration_tests/prometheus/docker-compose.yml @@ -0,0 +1,94 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + image: "prom/prometheus:latest" + command: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/usr/share/prometheus/console_libraries" + - "--web.console.templates=/usr/share/prometheus/consoles" + - "--web.listen-address=0.0.0.0:9500" + - "--storage.tsdb.retention.time=5m" # Use prometheus for short-term storage. + expose: + - "9500" + ports: + - "9500:9500" + depends_on: [] + volumes: + - "prometheus-0:/prometheus" + - "./prometheus.yaml:/etc/prometheus/prometheus.yml" + environment: {} + container_name: prometheus-0 + healthcheck: + test: + - CMD + - printf + - "" + - /dev/tcp/127.0.0.1/9500 + interval: 1s + timeout: 5s + retries: 5 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + prometheus-kafka-adaptor: + image: "telefonica/prometheus-kafka-adapter:1.8.0" + expose: + - "9501" + ports: + - "9501:9501" + environment: + - KAFKA_BROKER_LIST=message_queue:29092 + - KAFKA_TOPIC=prometheus + - PORT=9501 + - GIN_MODE=release + - LOG_LEVEL=info + - SERIALIZATION_FORMAT=json + container_name: prometheus-kafka-adaptor + depends_on: + - prometheus-0 + - message_queue +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/prometheus/prometheus.yaml b/integration_tests/prometheus/prometheus.yaml new file mode 100644 index 0000000000000..8fef25a62dc17 --- /dev/null +++ b/integration_tests/prometheus/prometheus.yaml @@ -0,0 +1,37 @@ +# --- THIS FILE IS AUTO GENERATED BY RISEDEV --- +global: + scrape_interval: 1s + evaluation_interval: 5s + +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ["prometheus-0:9500"] + + - job_name: compute + static_configs: + - targets: ["compute-node-0:1222"] + + - job_name: meta + static_configs: + - targets: ["meta-node-0:1250"] + + - job_name: minio + metrics_path: /minio/v2/metrics/cluster + static_configs: + - targets: ["minio-0:9301"] + + - job_name: compactor + static_configs: + - targets: ["compactor-0:1260"] + + - job_name: etcd + static_configs: + - targets: ["etcd-0:2379"] + + - job_name: redpanda + static_configs: + - targets: ["redpanda:9644"] + +remote_write: + - url: http://prometheus-kafka-adaptor:9501/receive diff --git a/integration_tests/prometheus/query.sql b/integration_tests/prometheus/query.sql new file mode 100644 index 0000000000000..e81b4b4ec74f7 --- /dev/null +++ b/integration_tests/prometheus/query.sql @@ -0,0 +1,8 @@ +select + * +from + metric_avg_30s +where + metric_name = 'object_store_read_bytes' +order by + metric_time; \ No newline at end of file diff --git a/integration_tests/schema-registry/create_mv.sql b/integration_tests/schema-registry/create_mv.sql new file mode 100644 index 0000000000000..7a02f5803cd4a --- /dev/null +++ b/integration_tests/schema-registry/create_mv.sql @@ -0,0 +1,10 @@ +CREATE MATERIALIZED VIEW student_view AS +SELECT + id, + name, + avg_score, + age, + schema_version +FROM + student +WHERE age > 10; \ No newline at end of file diff --git a/integration_tests/schema-registry/create_source.sql b/integration_tests/schema-registry/create_source.sql new file mode 100644 index 0000000000000..09c078f99f432 --- /dev/null +++ b/integration_tests/schema-registry/create_source.sql @@ -0,0 +1,8 @@ +CREATE SOURCE student WITH ( + connector = 'kafka', + topic = 'sr-test', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) +ROW FORMAT avro +row schema location confluent schema registry 'http://message_queue:8081'; \ No newline at end of file diff --git a/integration_tests/schema-registry/data_check b/integration_tests/schema-registry/data_check new file mode 100644 index 0000000000000..de11e3bcc0c0c --- /dev/null +++ b/integration_tests/schema-registry/data_check @@ -0,0 +1 @@ +student_view \ No newline at end of file diff --git a/integration_tests/schema-registry/datagen.py b/integration_tests/schema-registry/datagen.py new file mode 100644 index 0000000000000..4ac7bd1537536 --- /dev/null +++ b/integration_tests/schema-registry/datagen.py @@ -0,0 +1,144 @@ +from confluent_kafka import Producer +from confluent_kafka.admin import AdminClient, NewTopic +from confluent_kafka.serialization import StringSerializer, SerializationContext, MessageField +from confluent_kafka.schema_registry import SchemaRegistryClient +from confluent_kafka.schema_registry.avro import AvroSerializer +import sys +import random +import time + +# the two versions of the schema are compatible +schema_v1 = r''' +{ + "name": "student", + "type": "record", + "fields": [ + { + "name": "id", + "type": "int", + "default": 0 + }, + { + "name": "name", + "type": "string", + "default": "" + }, + { + "name": "avg_score", + "type": "double", + "default": 0.0 + }, + { + "name": "age", + "type": "int", + "default": 0 + }, + { + "name": "schema_version", + "type": "string", + "default": "" + } + ] +} +''' + +schema_v2 = r''' +{ + "name": "student", + "type": "record", + "fields": [ + { + "name": "id", + "type": "int", + "default": 0 + }, + { + "name": "name", + "type": "string", + "default": "" + }, + { + "name": "avg_score", + "type": "double", + "default": 0.0 + }, + { + "name": "age", + "type": "int", + "default": 0 + }, + { + "name": "facebook_id", + "type": "string", + "default": "" + }, + { + "name": "schema_version", + "type": "string", + "default": "" + } + ] +} +''' + +schemas = {'v1': schema_v1, 'v2': schema_v2} + + +def create_topic(kafka_conf, topic_name): + client = AdminClient(kafka_conf) + topic_list = [] + topic_list.append( + NewTopic(topic_name, num_partitions=1, replication_factor=1)) + client.create_topics(topic_list) + + +def get_basic_value(id): + return {'id': id, 'name': ''.join(random.sample('zyxwvutsrqponmlkjihgfedcba', 7)), 'avg_score': random.random() * 100, 'age': random.randint(10, 100)} + + +def get_value_and_serializer(id, version, schema_registry_client): + value = get_basic_value(id) + value['schema_version'] = version + if version == 'v2': + value['facebook_id'] = "12345678" + return value, AvroSerializer(schema_registry_client=schema_registry_client, schema_str=schemas[version]) + + +def delivery_report(err, msg): + if err is not None: + print("Delivery failed for User record {}: {}".format(msg.value(), err)) + return + + +if __name__ == '__main__': + if len(sys.argv) < 4: + print("datagen.py ") + broker_list = sys.argv[1] + schema_registry_url = sys.argv[2] + topic = sys.argv[3] + + print("broker_list: {}".format(broker_list)) + print("schema_registry_url: {}".format(schema_registry_url)) + print("topic: {}".format(topic)) + + schema_registry_conf = {'url': schema_registry_url} + kafka_conf = {'bootstrap.servers': broker_list} + schema_registry_client = SchemaRegistryClient(schema_registry_conf) + + create_topic(kafka_conf=kafka_conf, topic_name=topic) + + id = 0 + while True: + for version in schemas.keys(): + id += 1 + value, avro_serializer = get_value_and_serializer( + id, version, schema_registry_client) + producer = Producer(kafka_conf) + producer.produce(topic=topic, partition=0, + value=avro_serializer( + value, SerializationContext(topic, MessageField.VALUE)), + on_delivery=delivery_report) + producer.flush() + if id % 100 == 0: + print("Sent {} records".format(id)) + time.sleep(0.05) diff --git a/integration_tests/schema-registry/docker-compose.yml b/integration_tests/schema-registry/docker-compose.yml new file mode 100644 index 0000000000000..ac9b6b26aeaf7 --- /dev/null +++ b/integration_tests/schema-registry/docker-compose.yml @@ -0,0 +1,68 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + image: python:3.10 + depends_on: [ message_queue ] + volumes: + - type: bind + source: ./datagen.py + target: /datagen.py + command: + - /bin/sh + - -c + - | + pip install requests fastavro confluent_kafka + python /datagen.py message_queue:29092 http://message_queue:8081 sr-test + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/schema-registry/query.sql b/integration_tests/schema-registry/query.sql new file mode 100644 index 0000000000000..95e60697428f1 --- /dev/null +++ b/integration_tests/schema-registry/query.sql @@ -0,0 +1,6 @@ +SELECT + * +FROM + student_view +limit + 10; diff --git a/integration_tests/schema-registry/readme.md b/integration_tests/schema-registry/readme.md new file mode 100644 index 0000000000000..dd1e9cac1d989 --- /dev/null +++ b/integration_tests/schema-registry/readme.md @@ -0,0 +1,95 @@ +This demo shows how to ingest Avro data into RisingWave with [Schema Registry](https://github.com/confluentinc/schema-registry), which manages multiple versions of Avro schemas. + +At the beginning, there's a datagen process that ingests Avro data into Redpanda (a Kafka-compatible message queue). The Avro schema is as follows: + +- **Version 1** + + ```json + { + "name": "student", + "type": "record", + "fields": [ + { + "name": "id", + "type": "int", + "default": 0 + }, + { + "name": "name", + "type": "string", + "default": "" + }, + { + "name": "avg_score", + "type": "double", + "default": 0.0 + }, + { + "name": "age", + "type": "int", + "default": 0 + }, + { + "name": "schema_version", + "type": "string", + "default": "" + } + ] + } + ``` + +- **Version 2** + + ```json + { + "name": "student", + "type": "record", + "fields": [ + { + "name": "id", + "type": "int", + "default": 0 + }, + { + "name": "name", + "type": "string", + "default": "" + }, + { + "name": "avg_score", + "type": "double", + "default": 0.0 + }, + { + "name": "age", + "type": "int", + "default": 0 + }, + { + "name": "facebook_id", + "type": "string", + "default": "" + }, + { + "name": "schema_version", + "type": "string", + "default": "" + } + ] + } + ``` + +As shown above, there are two versions of the schema. The new version contains an additional field `facebook_id`. Hence, it is backward-compatible with the old version. The data will be generated randomly (50/50) using one of the versions. + +Then, this demo will connect RisingWave to the message queue. Here we specify the address as `confluent schema registry 'http://message_queue:8081'`. The final CREATE SOURCE query is as follows: + +```sql +CREATE SOURCE student WITH ( + connector = 'kafka', + topic = 'sr-test', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) +ROW FORMAT avro +row schema location confluent schema registry 'http://message_queue:8081'; +``` diff --git a/integration_tests/scripts/.gitignore b/integration_tests/scripts/.gitignore new file mode 100644 index 0000000000000..31e3ea5413583 --- /dev/null +++ b/integration_tests/scripts/.gitignore @@ -0,0 +1,4 @@ +bin/ +include/ +lib/ +pyvenv.cfg diff --git a/integration_tests/scripts/check_data.py b/integration_tests/scripts/check_data.py new file mode 100644 index 0000000000000..1d0a4b52c00d6 --- /dev/null +++ b/integration_tests/scripts/check_data.py @@ -0,0 +1,49 @@ +#!/usr/bin/python3 + +# Every demo directory contains a 'data_check' file that lists the relations (either source or mv) +# that are expected to have >=1 rows. This script runs the checks by creating a materialized view over it, +# and verify the rows count in the view. + +import os +from posixpath import abspath +import subprocess +import sys + +from os.path import dirname +import time + + +def create_mv(rel: str): + if "_mv" in rel: + raise Exception('relation "{}" must not contains "_mv"'.format(rel)) + run_sql("CREATE MATERIALIZED VIEW {0}_mv AS SELECT * FROM {0}".format(rel)) + + +def check_mv(rel: str): + rows = run_sql("SELECT COUNT(*) FROM {}_mv".format(rel)) + rows = int(rows.decode('utf8').strip()) + print("{} rows in {}".format(rows, rel)) + assert rows >= 1 + + +def run_sql(sql): + print("Running SQL: {}".format(sql)) + return subprocess.check_output(["psql", "-h", "localhost", "-p", "4566", + "-d", "dev", "-U", "root", "--tuples-only", "-c", sql]) + + +demo = sys.argv[1] +if demo in ['docker', 'iceberg-sink'] : + print('Skip for running test for `%s`'%demo) + sys.exit(0) +file_dir = dirname(abspath(__file__)) +project_dir = dirname(file_dir) +demo_dir = os.path.join(project_dir, demo) +data_check = os.path.join(demo_dir, 'data_check') +with open(data_check) as f: + relations = f.read().split(",") + for rel in relations: + create_mv(rel) + time.sleep(20) + for rel in relations: + check_mv(rel) diff --git a/integration_tests/scripts/gen_pb_compose.py b/integration_tests/scripts/gen_pb_compose.py new file mode 100644 index 0000000000000..4687c7688852d --- /dev/null +++ b/integration_tests/scripts/gen_pb_compose.py @@ -0,0 +1,48 @@ +#!/usr/bin/python3 + +import argparse +import os +import sys +from os.path import (dirname, abspath) + + +file_server = """ file_server: + image: halverneus/static-file-server:latest + volumes: + - "./:/demo" + restart: always + environment: + FOLDER: /demo + container_name: file_server +""" + + +def gen_docker_compose(demo_compose: str, format: str): + content = "" + with open(demo_compose) as file: + for line in file: + line = line.replace(" - /datagen", + " - /datagen --format {}".format(format)) + if line == 'volumes:\n': + content += file_server + content += line + with open(demo_compose, 'w') as file: + file.write(content) + + +demo = sys.argv[1] +if demo == 'docker': + print('Will not generate docker-compose file for `docker`') + sys.exit(0) + +format = sys.argv[2] +if format not in ["json", "protobuf", "avro"]: + print('Invalid format: {}'.format(format)) + sys.exit(1) + +file_dir = dirname(abspath(__file__)) +project_dir = dirname(file_dir) +demo_dir = os.path.join(project_dir, demo) +demo_compose = os.path.join(demo_dir, 'docker-compose.yml') + +gen_docker_compose(demo_compose, format) diff --git a/integration_tests/scripts/run_demos.py b/integration_tests/scripts/run_demos.py new file mode 100644 index 0000000000000..fc2bea0d837c3 --- /dev/null +++ b/integration_tests/scripts/run_demos.py @@ -0,0 +1,111 @@ +#!/usr/bin/python3 + +from os.path import (dirname, abspath) +import os +import sys +import subprocess +from time import sleep +import argparse + + +def run_sql_file(f: str, dir: str): + print("Running SQL file: {}".format(f)) + # ON_ERROR_STOP=1 will let psql return error code when the query fails. + # https://stackoverflow.com/questions/37072245/check-return-status-of-psql-command-in-unix-shell-scripting + proc = subprocess.run(["psql", "-h", "localhost", "-p", "4566", + "-d", "dev", "-U", "root", "-f", f, "-v", "ON_ERROR_STOP=1"], check=True, + cwd=dir) + if proc.returncode != 0: + sys.exit(1) + + +def run_demo(demo: str, format: str): + file_dir = dirname(abspath(__file__)) + project_dir = dirname(file_dir) + demo_dir = os.path.join(project_dir, demo) + print("Running demo: {}".format(demo)) + + subprocess.run(["docker", "compose", "up", "-d"], cwd=demo_dir, check=True) + sleep(40) + + sql_files = ['create_source.sql', 'create_mv.sql', 'query.sql'] + for fname in sql_files: + if format == 'protobuf': + sql_file = os.path.join(demo_dir, "pb", fname) + if os.path.isfile(sql_file): + # Try to run the protobuf version first. + run_sql_file(sql_file, demo_dir) + sleep(10) + continue + # Fallback to default version when the protobuf version doesn't exist. + sql_file = os.path.join(demo_dir, fname) + run_sql_file(sql_file, demo_dir) + sleep(10) + + +def run_iceberg_demo(): + demo = "iceberg-sink" + file_dir = dirname(abspath(__file__)) + project_dir = dirname(file_dir) + demo_dir = os.path.join(project_dir, demo) + print("Running demo: iceberg-sink") + + subprocess.run(["docker", "compose", "up", "-d"], cwd=demo_dir, check=True) + sleep(40) + + subprocess.run(["docker", "compose", "exec", "spark", "bash", "/spark-script/run-sql-file.sh", "create-table"], + cwd=demo_dir, check=True) + + sql_files = ['create_source.sql', 'create_mv.sql', 'create_sink.sql'] + for fname in sql_files: + sql_file = os.path.join(demo_dir, fname) + print("executing sql: ", open(sql_file).read()) + run_sql_file(sql_file, demo_dir) + sleep(10) + + print("sink created. Wait for 2 min time for ingestion") + + # wait for two minutes ingestion + sleep(120) + + query_sql = open(os.path.join(demo_dir, "iceberg-query.sql")).read() + + print("querying iceberg with presto sql: %s" % query_sql) + + query_output_file_name = "query_outout.txt" + + query_output_file = open(query_output_file_name, "wb") + + subprocess.run(["docker", "compose", "exec", "presto", "presto-cli", "--server", "localhost:8080", "--execute", query_sql], + cwd=demo_dir, check=True, stdout=query_output_file) + query_output_file.close() + + output_content = open(query_output_file_name).read() + + print(output_content) + + assert len(output_content.strip()) > 0 + + +arg_parser = argparse.ArgumentParser(description='Run the demo') +arg_parser.add_argument('--format', + metavar='format', + type=str, + help='the format of output data', + default='json') +arg_parser.add_argument('--case', + metavar='case', + type=str, + help='the test case') +args = arg_parser.parse_args() + +# disable telemetry in env +os.environ['ENABLE_TELEMETRY'] = "false" + +if args.case == "iceberg-sink": + if args.format == "protobuf": + print("skip protobuf test for iceberg-sink") + else: + run_iceberg_demo() +else: + run_demo(args.case, args.format) diff --git a/integration_tests/superset/create_mv.sql b/integration_tests/superset/create_mv.sql new file mode 100644 index 0000000000000..99f4f09cff42f --- /dev/null +++ b/integration_tests/superset/create_mv.sql @@ -0,0 +1,13 @@ +-- A real-time dashboard of the total UV. +CREATE MATERIALIZED VIEW total_user_visit_1min AS +SELECT + window_start AS report_ts, + COUNT(DISTINCT user_id) as uv +FROM + TUMBLE( + live_stream_metrics, + report_timestamp, + INTERVAL '1' MINUTE + ) +GROUP BY + window_start; \ No newline at end of file diff --git a/integration_tests/superset/create_source.sql b/integration_tests/superset/create_source.sql new file mode 100644 index 0000000000000..3e230af07f1b0 --- /dev/null +++ b/integration_tests/superset/create_source.sql @@ -0,0 +1,26 @@ +CREATE SOURCE live_stream_metrics ( + client_ip VARCHAR, + user_agent VARCHAR, + user_id VARCHAR, + -- The live room. + room_id VARCHAR, + -- Sent bits per second. + video_bps BIGINT, + -- Sent frames per second. Typically 30 fps. + video_fps BIGINT, + -- Round-trip time (in ms). 200ms is recommended. + video_rtt BIGINT, + -- Lost packets per second. + video_lost_pps BIGINT, + -- How long was the longest freeze (in ms). + video_longest_freeze_duration BIGINT, + -- Total freeze duration. + video_total_freeze_duration BIGINT, + report_timestamp TIMESTAMPTZ, + country VARCHAR +) WITH ( + connector = 'kafka', + topic = 'live_stream_metrics', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/superset/docker-compose.yml b/integration_tests/superset/docker-compose.yml new file mode 100644 index 0000000000000..f8309c4ca2abb --- /dev/null +++ b/integration_tests/superset/docker-compose.yml @@ -0,0 +1,137 @@ +x-superset-image: &superset-image apache/superset:${TAG:-latest-dev} +x-superset-depends-on: + &superset-depends-on + - db + - redis +x-superset-volumes: + # /app/pythonpath_docker will be appended to the PYTHONPATH in the final container + &superset-volumes + - ./docker:/app/docker + - superset_home:/app/superset_home + +version: "3.7" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode livestream --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen + + # Superset-related services # + + redis: + image: redis:latest + container_name: superset_cache + restart: unless-stopped + volumes: + - redis:/data + + db: + env_file: docker/.env-non-dev + image: postgres:10 + container_name: superset_db + restart: unless-stopped + volumes: + - db_home:/var/lib/postgresql/data + + superset: + env_file: docker/.env-non-dev + image: *superset-image + container_name: superset_app + command: [ "/app/docker/docker-bootstrap.sh", "app-gunicorn" ] + user: "root" + restart: unless-stopped + ports: + - 8088:8088 + depends_on: *superset-depends-on + volumes: *superset-volumes + + superset-init: + image: *superset-image + container_name: superset_init + command: [ "/app/docker/docker-init.sh" ] + env_file: docker/.env-non-dev + depends_on: *superset-depends-on + user: "root" + volumes: *superset-volumes + + superset-worker: + image: *superset-image + container_name: superset_worker + command: [ "/app/docker/docker-bootstrap.sh", "worker" ] + env_file: docker/.env-non-dev + restart: unless-stopped + depends_on: *superset-depends-on + user: "root" + volumes: *superset-volumes + + superset-worker-beat: + image: *superset-image + container_name: superset_worker_beat + command: [ "/app/docker/docker-bootstrap.sh", "beat" ] + env_file: docker/.env-non-dev + restart: unless-stopped + depends_on: *superset-depends-on + user: "root" + volumes: *superset-volumes + +volumes: + superset_home: + external: false + db_home: + external: false + redis: + external: false + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/superset/docker/.env-non-dev b/integration_tests/superset/docker/.env-non-dev new file mode 100644 index 0000000000000..c6e4739e90a9e --- /dev/null +++ b/integration_tests/superset/docker/.env-non-dev @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +COMPOSE_PROJECT_NAME=superset + +# database configurations (do not modify) +DATABASE_DB=superset +DATABASE_HOST=db +DATABASE_PASSWORD=superset +DATABASE_USER=superset + +# database engine specific environment variables +# change the below if you prefers another database engine +DATABASE_PORT=5432 +DATABASE_DIALECT=postgresql +POSTGRES_DB=superset +POSTGRES_USER=superset +POSTGRES_PASSWORD=superset +#MYSQL_DATABASE=superset +#MYSQL_USER=superset +#MYSQL_PASSWORD=superset +#MYSQL_RANDOM_ROOT_PASSWORD=yes + +# Add the mapped in /app/pythonpath_docker which allows devs to override stuff +PYTHONPATH=/app/pythonpath:/app/docker/pythonpath_dev +REDIS_HOST=redis +REDIS_PORT=6379 + +FLASK_ENV=production +SUPERSET_ENV=production +SUPERSET_LOAD_EXAMPLES=no +CYPRESS_CONFIG=false +SUPERSET_PORT=8088 diff --git a/integration_tests/superset/docker/docker-bootstrap.sh b/integration_tests/superset/docker/docker-bootstrap.sh new file mode 100755 index 0000000000000..67e5294be5fdc --- /dev/null +++ b/integration_tests/superset/docker/docker-bootstrap.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -eo pipefail + +REQUIREMENTS_LOCAL="/app/docker/requirements-local.txt" +# If Cypress run – overwrite the password for admin and export env variables +if [ "$CYPRESS_CONFIG" == "true" ]; then + export SUPERSET_CONFIG=tests.integration_tests.superset_test_config + export SUPERSET_TESTENV=true + export ENABLE_REACT_CRUD_VIEWS=true + export SUPERSET__SQLALCHEMY_DATABASE_URI=postgresql+psycopg2://superset:superset@db:5432/superset +fi +# +# Make sure we have dev requirements installed +# +if [ -f "${REQUIREMENTS_LOCAL}" ]; then + echo "Installing local overrides at ${REQUIREMENTS_LOCAL}" + pip install -r "${REQUIREMENTS_LOCAL}" +else + echo "Skipping local overrides" +fi + +if [[ "${1}" == "worker" ]]; then + echo "Starting Celery worker..." + celery --app=superset.tasks.celery_app:app worker -Ofair -l INFO +elif [[ "${1}" == "beat" ]]; then + echo "Starting Celery beat..." + celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid -l INFO -s "${SUPERSET_HOME}"/celerybeat-schedule +elif [[ "${1}" == "app" ]]; then + echo "Starting web app..." + flask run -p 8088 --with-threads --reload --debugger --host=0.0.0.0 +elif [[ "${1}" == "app-gunicorn" ]]; then + echo "Starting web app..." + /usr/bin/run-server.sh +fi diff --git a/integration_tests/superset/docker/docker-init.sh b/integration_tests/superset/docker/docker-init.sh new file mode 100755 index 0000000000000..c928c1ba505af --- /dev/null +++ b/integration_tests/superset/docker/docker-init.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +set -e + +# +# Always install local overrides first +# +/app/docker/docker-bootstrap.sh + +STEP_CNT=4 + +echo_step() { +cat < str: + """Get the environment variable or raise exception.""" + try: + return os.environ[var_name] + except KeyError: + if default is not None: + return default + else: + error_msg = "The environment variable {} was missing, abort...".format( + var_name + ) + raise EnvironmentError(error_msg) + + +DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT") +DATABASE_USER = get_env_variable("DATABASE_USER") +DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD") +DATABASE_HOST = get_env_variable("DATABASE_HOST") +DATABASE_PORT = get_env_variable("DATABASE_PORT") +DATABASE_DB = get_env_variable("DATABASE_DB") + +# The SQLAlchemy connection string. +SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % ( + DATABASE_DIALECT, + DATABASE_USER, + DATABASE_PASSWORD, + DATABASE_HOST, + DATABASE_PORT, + DATABASE_DB, +) + +REDIS_HOST = get_env_variable("REDIS_HOST") +REDIS_PORT = get_env_variable("REDIS_PORT") +REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", "0") +REDIS_RESULTS_DB = get_env_variable("REDIS_RESULTS_DB", "1") + +RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab") + +CACHE_CONFIG = { + "CACHE_TYPE": "redis", + "CACHE_DEFAULT_TIMEOUT": 300, + "CACHE_KEY_PREFIX": "superset_", + "CACHE_REDIS_HOST": REDIS_HOST, + "CACHE_REDIS_PORT": REDIS_PORT, + "CACHE_REDIS_DB": REDIS_RESULTS_DB, +} +DATA_CACHE_CONFIG = CACHE_CONFIG + + +class CeleryConfig(object): + BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}" + CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks") + CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}" + CELERYD_LOG_LEVEL = "DEBUG" + CELERYD_PREFETCH_MULTIPLIER = 1 + CELERY_ACKS_LATE = False + CELERYBEAT_SCHEDULE = { + "reports.scheduler": { + "task": "reports.scheduler", + "schedule": crontab(minute="*", hour="*"), + }, + "reports.prune_log": { + "task": "reports.prune_log", + "schedule": crontab(minute=10, hour=0), + }, + } + + +CELERY_CONFIG = CeleryConfig + +FEATURE_FLAGS = {"ALERT_REPORTS": True} +ALERT_REPORTS_NOTIFICATION_DRY_RUN = True +WEBDRIVER_BASEURL = "http://superset:8088/" +# The base URL for the email report hyperlinks. +WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL + +SQLLAB_CTAS_NO_LIMIT = True + +# +# Optionally import superset_config_docker.py (which will have been included on +# the PYTHONPATH) in order to allow for local settings to be overridden +# +try: + import superset_config_docker + from superset_config_docker import * # noqa + + logger.info( + f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]" + ) +except ImportError: + logger.info("Using default Docker config...") diff --git a/integration_tests/superset/docker/requirements-local.txt b/integration_tests/superset/docker/requirements-local.txt new file mode 100644 index 0000000000000..8af33f481367f --- /dev/null +++ b/integration_tests/superset/docker/requirements-local.txt @@ -0,0 +1 @@ +sqlalchemy-risingwave diff --git a/integration_tests/superset/docker/run-server.sh b/integration_tests/superset/docker/run-server.sh new file mode 100644 index 0000000000000..064f47b9c2cbc --- /dev/null +++ b/integration_tests/superset/docker/run-server.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +HYPHEN_SYMBOL='-' + +gunicorn \ + --bind "${SUPERSET_BIND_ADDRESS:-0.0.0.0}:${SUPERSET_PORT:-8088}" \ + --access-logfile "${ACCESS_LOG_FILE:-$HYPHEN_SYMBOL}" \ + --error-logfile "${ERROR_LOG_FILE:-$HYPHEN_SYMBOL}" \ + --workers ${SERVER_WORKER_AMOUNT:-1} \ + --worker-class ${SERVER_WORKER_CLASS:-gthread} \ + --threads ${SERVER_THREADS_AMOUNT:-20} \ + --timeout ${GUNICORN_TIMEOUT:-60} \ + --keep-alive ${GUNICORN_KEEPALIVE:-2} \ + --limit-request-line ${SERVER_LIMIT_REQUEST_LINE:-0} \ + --limit-request-field_size ${SERVER_LIMIT_REQUEST_FIELD_SIZE:-0} \ + "${FLASK_APP}" diff --git a/integration_tests/superset/query.sql b/integration_tests/superset/query.sql new file mode 100644 index 0000000000000..16a7e1efa27a6 --- /dev/null +++ b/integration_tests/superset/query.sql @@ -0,0 +1,6 @@ +select + * +FROM + total_user_visit_1min +LIMIT + 1; \ No newline at end of file diff --git a/integration_tests/tidb-cdc-sink/config/changefeed.toml b/integration_tests/tidb-cdc-sink/config/changefeed.toml new file mode 100644 index 0000000000000..b84615c14e3df --- /dev/null +++ b/integration_tests/tidb-cdc-sink/config/changefeed.toml @@ -0,0 +1,4 @@ +[sink] +dispatchers = [ + {matcher = ['*.*'], topic = "ticdc_{schema}_{table}"}, +] diff --git a/integration_tests/tidb-cdc-sink/config/pd.toml b/integration_tests/tidb-cdc-sink/config/pd.toml new file mode 100644 index 0000000000000..19f8a1119f99c --- /dev/null +++ b/integration_tests/tidb-cdc-sink/config/pd.toml @@ -0,0 +1,86 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +# if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +# if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +# Path of file that contains X509 certificate in PEM format. +cert-path = "" +# Path of file that contains X509 key in PEM format. +key-path = "" + +[log] +level = "error" + +# log format, one of json, text, console +#format = "text" + +# disable automatic timestamps in output +#disable-timestamp = false + +# file logging +[log.file] +#filename = "" +# max log file size in MB +#max-size = 300 +# max log file keep days +#max-days = 28 +# maximum number of old log files to retain +#max-backups = 7 +# rotate log by day +#log-rotate = true + +[metric] +# prometheus client push interval, set "0s" to disable prometheus. +interval = "15s" +# prometheus pushgateway address, leaves it empty will disable prometheus. +address = "prometheus-0:9091" + +[schedule] +max-merge-region-size = 0 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +merge-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +# customized schedulers, the format is as below +# if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +# The number of replicas for each region. +max-replicas = 3 +# The label keys specified the location of a store. +# The placement priorities is implied by the order of label keys. +# For example, ["zone", "rack"] means that we should place replicas to +# different zones first, then to different racks if we don't have enough zones. +location-labels = [] + +[label-property] +# Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 diff --git a/integration_tests/tidb-cdc-sink/config/tidb.toml b/integration_tests/tidb-cdc-sink/config/tidb.toml new file mode 100644 index 0000000000000..c58a052b6e6a3 --- /dev/null +++ b/integration_tests/tidb-cdc-sink/config/tidb.toml @@ -0,0 +1,239 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "mocktikv" + +# TiDB storage path. +path = "/tmp/tidb" + +# The socket file to use for connection. +socket = "" + +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "0" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +# Only print a log when out of memory quota. +# Valid options: ["log", "cancel"] +oom-action = "log" + +# Set the memory quota for a query in bytes. Default: 32GB +mem-quota-query = 34359738368 + +# Enable coprocessor streaming. +enable-streaming = false + +# Set system variable 'lower_case_table_names' +lower-case-table-names = 2 + +[log] +# Log level: debug, info, warn, error, fatal. +level = "error" + +# Log format, one of json, text, console. +format = "text" + +# Disable automatic timestamp in output +disable-timestamp = false + +# Stores slow query log into separated files. +slow-query-file = "" + +# Queries with execution time greater than this value will be logged. (Milliseconds) +slow-threshold = 300 + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 2048 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +# Rotate log by day +log-rotate = true + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status port. +status-port = 10080 + +# Prometheus pushgateway address, leaves it empty will disable prometheus push. +metrics-addr = "prometheus-0:9091" + +# Prometheus client push interval in second, set \"0\" to disable prometheus push. +metrics-interval = 15 + +[performance] +# Max CPUs to use, 0 use number of CPUs in the machine. +max-procs = 0 +# StmtCountLimit limits the max count of statement inside a transaction. +stmt-count-limit = 5000 + +# Set keep alive option for tcp connection. +tcp-keep-alive = true + +# The maximum number of retries when commit a transaction. +retry-limit = 10 + +# Whether support cartesian product. +cross-join = true + +# Stats lease duration, which influences the time of analyze and stats load. +stats-lease = "3s" + +# Run auto analyze worker on this tidb-server. +run-auto-analyze = true + +# Probability to use the query feedback to update stats, 0 or 1 for always false/true. +feedback-probability = 0.0 + +# The max number of query feedback that cache in memory. +query-feedback-limit = 1024 + +# Pseudo stats will be used if the ratio between the modify count and +# row count in statistics of a table is greater than it. +pseudo-estimate-ratio = 0.7 + +[proxy-protocol] +# PROXY protocol acceptable client networks. +# Empty string means disable PROXY protocol, * means all networks. +networks = "" + +# PROXY protocol header read timeout, unit is second +header-timeout = 5 + +[plan-cache] +enabled = false +capacity = 2560 +shards = 256 + +[prepared-plan-cache] +enabled = false +capacity = 100 + +[opentracing] +# Enable opentracing. +enable = false + +# Whether to enable the rpc metrics. +rpc-metrics = false + +[opentracing.sampler] +# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote +type = "const" + +# Param is a value passed to the sampler. +# Valid values for Param field are: +# - for "const" sampler, 0 or 1 for always false/true respectively +# - for "probabilistic" sampler, a probability between 0 and 1 +# - for "rateLimiting" sampler, the number of spans per second +# - for "remote" sampler, param is the same as for "probabilistic" +# and indicates the initial sampling rate before the actual one +# is received from the mothership +param = 1.0 + +# SamplingServerURL is the address of jaeger-agent's HTTP sampling server +sampling-server-url = "" + +# MaxOperations is the maximum number of operations that the sampler +# will keep track of. If an operation is not tracked, a default probabilistic +# sampler will be used rather than the per operation specific sampler. +max-operations = 0 + +# SamplingRefreshInterval controls how often the remotely controlled sampler will poll +# jaeger-agent for the appropriate sampling strategy. +sampling-refresh-interval = 0 + +[opentracing.reporter] +# QueueSize controls how many spans the reporter can keep in memory before it starts dropping +# new spans. The queue is continuously drained by a background go-routine, as fast as spans +# can be sent out of process. +queue-size = 0 + +# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. +# It is generally not useful, as it only matters for very low traffic services. +buffer-flush-interval = 0 + +# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter +# and logs all submitted spans. Main Configuration.Logger must be initialized in the code +# for this option to have any effect. +log-spans = false + +# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address +local-agent-host-port = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 16 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" + +[binlog] + +# Socket file to write binlog. +binlog-socket = "" + +# WriteTimeout specifies how long it will wait for writing binlog to pump. +write-timeout = "15s" + +# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, +# but still provide service. +ignore-error = false diff --git a/integration_tests/tidb-cdc-sink/config/tikv.toml b/integration_tests/tidb-cdc-sink/config/tikv.toml new file mode 100644 index 0000000000000..0757990597098 --- /dev/null +++ b/integration_tests/tidb-cdc-sink/config/tikv.toml @@ -0,0 +1,497 @@ +# TiKV config template +# Human-readable big numbers: +# File size(based on byte): KB, MB, GB, TB, PB +# e.g.: 1_048_576 = "1MB" +# Time(based on ms): ms, s, m, h +# e.g.: 78_000 = "1.3m" + +# log level: trace, debug, info, warn, error, off. +log-level = "error" +# file to store log, write to stderr if it's empty. +# log-file = "" + +[readpool.storage] +# size of thread pool for high-priority operations +# high-concurrency = 4 +# size of thread pool for normal-priority operations +# normal-concurrency = 4 +# size of thread pool for low-priority operations +# low-concurrency = 4 +# max running high-priority operations, reject if exceed +# max-tasks-high = 8000 +# max running normal-priority operations, reject if exceed +# max-tasks-normal = 8000 +# max running low-priority operations, reject if exceed +# max-tasks-low = 8000 +# size of stack size for each thread pool +# stack-size = "10MB" + +[readpool.coprocessor] +# Notice: if CPU_NUM > 8, default thread pool size for coprocessors +# will be set to CPU_NUM * 0.8. + +# high-concurrency = 8 +# normal-concurrency = 8 +# low-concurrency = 8 +# max-tasks-high = 16000 +# max-tasks-normal = 16000 +# max-tasks-low = 16000 +# stack-size = "10MB" + +[server] +# set listening address. +# addr = "127.0.0.1:20160" +# set advertise listening address for client communication, if not set, use addr instead. +# advertise-addr = "" +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# compression type for grpc channel, available values are no, deflate and gzip. +# grpc-compression-type = "no" +# size of thread pool for grpc server. +# grpc-concurrency = 4 +# The number of max concurrent streams/requests on a client connection. +# grpc-concurrent-stream = 1024 +# The number of connections with each tikv server to send raft messages. +# grpc-raft-conn-num = 10 +# Amount to read ahead on individual grpc streams. +# grpc-stream-initial-window-size = "2MB" + +# How many snapshots can be sent concurrently. +# concurrent-send-snap-limit = 32 +# How many snapshots can be recv concurrently. +# concurrent-recv-snap-limit = 32 + +# max count of tasks being handled, new tasks will be rejected. +# end-point-max-tasks = 2000 + +# max recursion level allowed when decoding dag expression +# end-point-recursion-limit = 1000 + +# max time to handle coprocessor request before timeout +# end-point-request-max-handle-duration = "60s" + +# the max bytes that snapshot can be written to disk in one second, +# should be set based on your disk performance +# snap-max-write-bytes-per-sec = "100MB" + +# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }. +# labels = {} + +[storage] +# set the path to rocksdb directory. +# data-dir = "/tmp/tikv/store" + +# notify capacity of scheduler's channel +# scheduler-notify-capacity = 10240 + +# maximum number of messages can be processed in one tick +# scheduler-messages-per-tick = 1024 + +# the number of slots in scheduler latches, concurrency control for write. +# scheduler-concurrency = 2048000 + +# scheduler's worker pool size, should increase it in heavy write cases, +# also should less than total cpu cores. +# scheduler-worker-pool-size = 4 + +# When the pending write bytes exceeds this threshold, +# the "scheduler too busy" error is displayed. +# scheduler-pending-write-threshold = "100MB" + +[pd] +# pd endpoints +# endpoints = [] + +[metric] +# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing. +# interval = "15s" +# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing. +address = "prometheus-0:9091" +# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1". +# job = "tikv" + +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +# sync-log = true + +# set the path to raftdb directory, default value is data-dir/raft +# raftdb-path = "" + +# set store capacity, if no set, use disk capacity. +# capacity = 0 + +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 + +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# Region heartbeat tick interval for reporting to pd. +# pd-heartbeat-tick-interval = "60s" +# Store heartbeat tick interval for reporting to pd. +# pd-store-heartbeat-tick-interval = "10s" + +# When region size changes exceeds region-split-check-diff, we should check +# whether the region should be split or not. +# region-split-check-diff = "6MB" + +# Interval to check region whether need to be split or not. +# split-region-check-tick-interval = "10s" + +# When raft entry exceed the max size, reject to propose the entry. +# raft-entry-max-size = "8MB" + +# Interval to gc unnecessary raft log. +# raft-log-gc-tick-interval = "10s" +# A threshold to gc stale raft log, must >= 1. +# raft-log-gc-threshold = 50 +# When entry count exceed this value, gc will be forced trigger. +# raft-log-gc-count-limit = 72000 +# When the approximate size of raft log entries exceed this value, gc will be forced trigger. +# It's recommended to set it to 3/4 of region-split-size. +# raft-log-gc-size-limit = "72MB" + +# When a peer hasn't been active for max-peer-down-duration, +# we will consider this peer to be down and report it to pd. +# max-peer-down-duration = "5m" + +# Interval to check whether start manual compaction for a region, +# region-compact-check-interval = "5m" +# Number of regions for each time to check. +# region-compact-check-step = 100 +# The minimum number of delete tombstones to trigger manual compaction. +# region-compact-min-tombstones = 10000 +# Interval to check whether should start a manual compaction for lock column family, +# if written bytes reach lock-cf-compact-threshold for lock column family, will fire +# a manual compaction for lock column family. +# lock-cf-compact-interval = "10m" +# lock-cf-compact-bytes-threshold = "256MB" + +# Interval (s) to check region whether the data are consistent. +# consistency-check-interval = 0 + +# Use delete range to drop a large number of continuous keys. +# use-delete-range = false + +# delay time before deleting a stale peer +# clean-stale-peer-delay = "10m" + +# Interval to cleanup import sst files. +# cleanup-import-sst-interval = "10m" + +[coprocessor] +# When it is true, it will try to split a region with table prefix if +# that region crosses tables. It is recommended to turn off this option +# if there will be a large number of tables created. +# split-region-on-table = true +# When the region's size exceeds region-max-size, we will split the region +# into two which the left region's size will be region-split-size or a little +# bit smaller. +# region-max-size = "144MB" +# region-split-size = "96MB" + +[rocksdb] +# Maximum number of concurrent background jobs (compactions and flushes) +# max-background-jobs = 8 + +# This value represents the maximum number of threads that will concurrently perform a +# compaction job by breaking it into multiple, smaller ones that are run simultaneously. +# Default: 1 (i.e. no subcompactions) +# max-sub-compactions = 1 + +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +# Max size of rocksdb's MANIFEST file. +# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST +# max-manifest-file-size = "20MB" + +# If true, the database will be created if it is missing. +# create-if-missing = true + +# rocksdb wal recovery mode +# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; +# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; +# 2 : PointInTimeRecovery, Recover to point-in-time consistency; +# 3 : SkipAnyCorruptedRecords, Recovery after a disaster; +# wal-recovery-mode = 2 + +# rocksdb write-ahead logs dir path +# This specifies the absolute dir path for write-ahead logs (WAL). +# If it is empty, the log files will be in the same dir as data. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-dir to a directory on a persistent storage. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-dir = "/tmp/tikv/store" + +# The following two fields affect how archived write-ahead logs will be deleted. +# 1. If both set to 0, logs will be deleted asap and will not get into the archive. +# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0, +# WAL files will be checked every 10 min and if total size is greater +# then wal-size-limit, they will be deleted starting with the +# earliest until size_limit is met. All empty files will be deleted. +# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then +# WAL files will be checked every wal-ttl-seconds / 2 and those that +# are older than wal-ttl-seconds will be deleted. +# 4. If both are not 0, WAL files will be checked every 10 min and both +# checks will be performed with ttl being first. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-ttl-seconds = 0 +# wal-size-limit = 0 + +# rocksdb max total wal size +# max-total-wal-size = "4GB" + +# Rocksdb Statistics provides cumulative stats over time. +# Turn statistics on will introduce about 5%-10% overhead for RocksDB, +# but it is worthy to know the internal status of RocksDB. +# enable-statistics = true + +# Dump statistics periodically in information logs. +# Same as rocksdb's default value (10 min). +# stats-dump-period = "10m" + +# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ, +# If you want to use rocksdb on multi disks or spinning disks, you should set value at +# least 2MB; +# compaction-readahead-size = 0 + +# This is the maximum buffer size that is used by WritableFileWrite +# writable-file-max-buffer-size = "1MB" + +# Use O_DIRECT for both reads and writes in background flush and compactions +# use-direct-io-for-flush-and-compaction = false + +# Limit the disk IO of compaction and flush. Compaction and flush can cause +# terrible spikes if they exceed a certain threshold. Consider setting this to +# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy +# write workload, limiting compaction and flush speed can cause write stalls too. +# rate-bytes-per-sec = 0 + +# Enable or disable the pipelined write +# enable-pipelined-write = true + +# Allows OS to incrementally sync files to disk while they are being +# written, asynchronously, in the background. +# bytes-per-sync = "0MB" + +# Allows OS to incrementally sync WAL to disk while it is being written. +# wal-bytes-per-sync = "0KB" + +# Specify the maximal size of the Rocksdb info log file. If the log file +# is larger than `max_log_file_size`, a new info log file will be created. +# If max_log_file_size == 0, all logs will be written to one log file. +# Default: 1GB +# info-log-max-size = "1GB" + +# Time for the Rocksdb info log file to roll (in seconds). +# If specified with non-zero value, log file will be rolled +# if it has been active longer than `log_file_time_to_roll`. +# Default: 0 (disabled) +# info-log-roll-time = "0" + +# Maximal Rocksdb info log files to be kept. +# Default: 10 +# info-log-keep-log-file-num = 10 + +# This specifies the Rocksdb info LOG dir. +# If it is empty, the log files will be in the same dir as data. +# If it is non empty, the log files will be in the specified dir, +# and the db data dir's absolute path will be used as the log file +# name's prefix. +# Default: empty +# info-log-dir = "" + +# Column Family default used to store actual data of the database. +[rocksdb.defaultcf] +# compression method (if any) is used to compress a block. +# no: kNoCompression +# snappy: kSnappyCompression +# zlib: kZlibCompression +# bzip2: kBZip2Compression +# lz4: kLZ4Compression +# lz4hc: kLZ4HCCompression +# zstd: kZSTD + +# per level compression +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] + +# Approximate size of user data packed per block. Note that the +# block size specified here corresponds to uncompressed data. +# block-size = "64KB" + +# If you're doing point lookups you definitely want to turn bloom filters on, We use +# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which +# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive +# rate, but increase memory usage and space amplification. +# bloom-filter-bits-per-key = 10 + +# false means one sst file one bloom filter, true means every block has a corresponding bloom filter +# block-based-bloom-filter = false + +# level0-file-num-compaction-trigger = 4 + +# Soft limit on number of level-0 files. We start slowing down writes at this point. +# level0-slowdown-writes-trigger = 20 + +# Maximum number of level-0 files. We stop writes at this point. +# level0-stop-writes-trigger = 36 + +# Amount of data to build up in memory (backed by an unsorted log +# on disk) before converting to a sorted on-disk file. +# write-buffer-size = "128MB" + +# The maximum number of write buffers that are built up in memory. +# max-write-buffer-number = 5 + +# The minimum number of write buffers that will be merged together +# before writing to storage. +# min-write-buffer-number-to-merge = 1 + +# Control maximum total data size for base level (level 1). +# max-bytes-for-level-base = "512MB" + +# Target file size for compaction. +# target-file-size-base = "8MB" + +# Max bytes for compaction.max_compaction_bytes +# max-compaction-bytes = "2GB" + +# There are four different algorithms to pick files to compact. +# 0 : ByCompensatedSize +# 1 : OldestLargestSeqFirst +# 2 : OldestSmallestSeqFirst +# 3 : MinOverlappingRatio +# compaction-pri = 3 + +# block-cache used to cache uncompressed blocks, big block-cache can speed up read. +# in normal cases should tune to 30%-50% system's total memory. +# block-cache-size = "1GB" + +# Indicating if we'd put index/filter blocks to the block cache. +# If not specified, each "table reader" object will pre-load index/filter block +# during table initialization. +# cache-index-and-filter-blocks = true + +# Pin level0 filter and index blocks in cache. +# pin-l0-filter-and-index-blocks = true + +# Enable read amplication statistics. +# value => memory usage (percentage of loaded blocks memory) +# 1 => 12.50 % +# 2 => 06.25 % +# 4 => 03.12 % +# 8 => 01.56 % +# 16 => 00.78 % +# read-amp-bytes-per-bit = 0 + +# Pick target size of each level dynamically. +# dynamic-level-bytes = true + +# Options for Column Family write +# Column Family write used to store commit information in MVCC model +[rocksdb.writecf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# in normal cases should tune to 10%-30% system's total memory. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 3 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[rocksdb.lockcf] +# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] +# block-size = "16KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "128MB" +# target-file-size-base = "8MB" +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 1 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[raftdb] +# max-sub-compactions = 1 +max-open-files = 1024 +# max-manifest-file-size = "20MB" +# create-if-missing = true + +# enable-statistics = true +# stats-dump-period = "10m" + +# compaction-readahead-size = 0 +# writable-file-max-buffer-size = "1MB" +# use-direct-io-for-flush-and-compaction = false +# enable-pipelined-write = true +# allow-concurrent-memtable-write = false +# bytes-per-sync = "0MB" +# wal-bytes-per-sync = "0KB" + +# info-log-max-size = "1GB" +# info-log-roll-time = "0" +# info-log-keep-log-file-num = 10 +# info-log-dir = "" + +[raftdb.defaultcf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# should tune to 256MB~2GB. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +# ca-path = "" +# cert-path = "" +# key-path = "" + +[import] +# the directory to store importing kv data. +# import-dir = "/tmp/tikv/import" +# number of threads to handle RPC requests. +# num-threads = 8 +# stream channel window size, stream will be blocked on channel full. +# stream-channel-window = 128 diff --git a/integration_tests/tidb-cdc-sink/create_mv.sql b/integration_tests/tidb-cdc-sink/create_mv.sql new file mode 100644 index 0000000000000..042da6cf585b3 --- /dev/null +++ b/integration_tests/tidb-cdc-sink/create_mv.sql @@ -0,0 +1,31 @@ +-- +-- Find the top10 hotest hashtags. +-- +CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( + SELECT + unnest(regexp_matches(tweet.text, '#\w+', 'g')) AS hashtag, + tweet.created_at AS created_at + FROM + tweet JOIN user + ON + tweet.author_id = user.id +) +SELECT + hashtag, + COUNT(*) AS hashtag_occurrences, + window_start +FROM + TUMBLE(tags, created_at, INTERVAL '5 minute') +GROUP BY + hashtag, + window_start +ORDER BY + hashtag_occurrences; + + +CREATE SINK hot_hashtags_sink FROM hot_hashtags +WITH ( + connector='jdbc', + jdbc.url='jdbc:mysql://tidb:4000/test?user=root&password=', + table.name='hot_hashtags' +); diff --git a/integration_tests/tidb-cdc-sink/create_source.sql b/integration_tests/tidb-cdc-sink/create_source.sql new file mode 100644 index 0000000000000..d58996f799d7d --- /dev/null +++ b/integration_tests/tidb-cdc-sink/create_source.sql @@ -0,0 +1,27 @@ +CREATE TABLE tweet ( + id varchar, + text varchar, + lang varchar, + created_at timestamp, + author_id varchar, + PRIMARY KEY (id) +) WITH ( + connector='kafka', + topic='ticdc_test_tweet', + properties.bootstrap.server='kafka:9092', + scan.startup.mode='earliest' +) ROW FORMAT CANAL_JSON; + +create table user ( + id varchar, + name varchar, + username varchar, + followers bigint, + created_at timestamp, + PRIMARY KEY (id) +) WITH ( + connector='kafka', + topic='ticdc_test_user', + properties.bootstrap.server='kafka:9092', + scan.startup.mode='earliest' +) ROW FORMAT CANAL_JSON; diff --git a/integration_tests/tidb-cdc-sink/data_check b/integration_tests/tidb-cdc-sink/data_check new file mode 100644 index 0000000000000..2f946d9fc6ae8 --- /dev/null +++ b/integration_tests/tidb-cdc-sink/data_check @@ -0,0 +1 @@ +tweet,user \ No newline at end of file diff --git a/integration_tests/tidb-cdc-sink/docker-compose.yml b/integration_tests/tidb-cdc-sink/docker-compose.yml new file mode 100644 index 0000000000000..e1b45c81a6bbf --- /dev/null +++ b/integration_tests/tidb-cdc-sink/docker-compose.yml @@ -0,0 +1,232 @@ +--- +version: '3' +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + connector-node: + extends: + file: ../../docker/docker-compose.yml + service: connector-node + + #=================== TiDB & TiCDC components ================== + ticdc-controller: + image: pingcap/ticdc:v6.6.0 + entrypoint: "/cdc cli" + command: + - changefeed + - create + - --server + - http://ticdc-capturer0:8300 + - --sink-uri + - "kafka://kafka:9092/ticdc_default?protocol=canal-json&kafka-version=2.4.0&partition-num=3&max-message-bytes=67108864&replication-factor=1" + - --changefeed-id + - "ticdc-replication-task" + - --config + - "/changefeed.toml" + volumes: + - ./config/changefeed.toml:/changefeed.toml:ro + depends_on: + - "pd" + - "kafka" + - "ticdc-capturer0" + restart: on-failure + + ticdc-capturer0: + image: pingcap/ticdc:v6.6.0 + entrypoint: "/cdc server" + ports: + - "8300:8300" + command: + - --addr=0.0.0.0:8300 + - --pd=http://pd:2379 + - --advertise-addr=ticdc-capturer0:8300 + depends_on: + - pd + - "tidb" + - "kafka" + restart: on-failure + + pd: + image: pingcap/pd:v6.6.0 + ports: + - "2379:2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + command: + - --name=pd + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd:2379 + - --advertise-peer-urls=http://pd:2380 + - --initial-cluster=pd=http://pd:2380 + - --data-dir=/data/pd + - --config=/pd.toml + restart: on-failure + + tikv0: + image: pingcap/tikv:v6.6.0 + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - /data + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd:2379 + - --config=/tikv.toml + depends_on: + - "pd" + restart: on-failure + + tikv1: + image: pingcap/tikv:v6.6.0 + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - /data + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd:2379 + - --config=/tikv.toml + depends_on: + - "pd" + restart: on-failure + + tikv2: + image: pingcap/tikv:v6.6.0 + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - /data + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd:2379 + - --config=/tikv.toml + depends_on: + - "pd" + restart: on-failure + + tidb: + image: pingcap/tidb:v6.6.0 + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./config/tidb.toml:/tidb.toml:ro + command: + - --store=tikv + - --path=pd:2379 + - --config=/tidb.toml + - --advertise-address=tidb + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + restart: on-failure + + #=================== Kakfa ================== + + # Adapted from https://github.com/confluentinc/demo-scene/blob/master/connect-jdbc/docker-compose.yml + zookeeper: + image: confluentinc/cp-zookeeper:5.5.1 + container_name: zookeeper + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + kafka: + image: confluentinc/cp-enterprise-kafka:5.5.1 + container_name: kafka + depends_on: + - "zookeeper" + ports: + # Exposes 9092 for external connections to the broker + # Use kafka:29092 for connections internal on the docker network + # See https://rmoff.net/2018/08/02/kafka-listeners-explained/ for details + - 9092:9092 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://kafka:9092 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" + KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 + CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092 + CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181 + CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 + CONFLUENT_METRICS_ENABLE: 'true' + CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' + + #===================== Others =================== + datagen: + build: ../datagen + depends_on: + - tidb + command: + - /bin/sh + - -c + - /datagen --mode twitter --qps 2 mysql --host tidb --db test --port 4000 --user root --password "" + restart: always + container_name: datagen + + init_tidb: + image: mysql:8.0 + depends_on: + - tidb + command: + - /bin/sh + - -c + - "mysql --password= -h tidb --port 4000 -u root test < tidb_create_tables.sql" + volumes: + - "./tidb_create_tables.sql:/tidb_create_tables.sql" + container_name: init_tidb + restart: on-failure + +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false \ No newline at end of file diff --git a/integration_tests/tidb-cdc-sink/query.sql b/integration_tests/tidb-cdc-sink/query.sql new file mode 100644 index 0000000000000..3e64784a04aa7 --- /dev/null +++ b/integration_tests/tidb-cdc-sink/query.sql @@ -0,0 +1,8 @@ +SELECT + * +FROM + hot_hashtags +ORDER BY + hashtag_occurrences DESC +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/tidb-cdc-sink/tidb_create_tables.sql b/integration_tests/tidb-cdc-sink/tidb_create_tables.sql new file mode 100644 index 0000000000000..3a7e4d448bf6f --- /dev/null +++ b/integration_tests/tidb-cdc-sink/tidb_create_tables.sql @@ -0,0 +1,25 @@ +CREATE TABLE `tweet` ( + `id` varchar(20) NOT NULL, + `text` varchar(1024) DEFAULT NULL, + `lang` varchar(20) DEFAULT NULL, + `created_at` timestamp NULL DEFAULT NULL, + `author_id` varchar(20) NOT NULL, + PRIMARY KEY (`id`) +); + +create table user ( + `id` varchar(20) NOT NULL, + `name` varchar(100) DEFAULT NULL, + `username` varchar(100) DEFAULT NULL, + `followers` bigint not null, + `created_at` timestamp NULL DEFAULT NULL, + PRIMARY KEY (`id`) +); + + +create table hot_hashtags ( + `window_start` timestamp not null, + `hashtag` varchar(100) not null, + `hashtag_occurrences` bigint not null, + PRIMARY KEY (window_start, hashtag) +); diff --git a/integration_tests/twitter-pulsar/create_mv.sql b/integration_tests/twitter-pulsar/create_mv.sql new file mode 100644 index 0000000000000..4f776516bd215 --- /dev/null +++ b/integration_tests/twitter-pulsar/create_mv.sql @@ -0,0 +1,34 @@ +-- +-- Find the influencers +-- +CREATE MATERIALIZED VIEW influencer_tweets AS +SELECT + (author).id as author_id, + (data).text as tweet +FROM + twitter +WHERE + (author).followers > 5000 + AND (data).lang = 'English'; + +-- +-- Find the top10 hotest hashtags. +-- +CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( + SELECT + unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, + (data).created_at AS created_at + FROM + twitter +) +SELECT + hashtag, + COUNT(*) AS hashtag_occurrences, + window_start +FROM + TUMBLE(tags, created_at, INTERVAL '1 day') +GROUP BY + hashtag, + window_start +ORDER BY + hashtag_occurrences; diff --git a/integration_tests/twitter-pulsar/create_source.sql b/integration_tests/twitter-pulsar/create_source.sql new file mode 100644 index 0000000000000..98492b15f4e1b --- /dev/null +++ b/integration_tests/twitter-pulsar/create_source.sql @@ -0,0 +1,19 @@ +-- +-- The Pulsar source version +-- +CREATE SOURCE twitter ( + data STRUCT < created_at TIMESTAMPTZ, + id VARCHAR, + text VARCHAR, + lang VARCHAR >, + author STRUCT < created_at TIMESTAMPTZ, + id VARCHAR, + name VARCHAR, + username VARCHAR, + followers INT > +) WITH ( + connector = 'pulsar', + pulsar.topic = 'twitter', + pulsar.admin.url = 'http://message_queue:8080', + pulsar.service.url = 'pulsar://message_queue:6650' +) ROW FORMAT JSON; diff --git a/integration_tests/twitter-pulsar/docker-compose.yml b/integration_tests/twitter-pulsar/docker-compose.yml new file mode 100644 index 0000000000000..cbc3df529533d --- /dev/null +++ b/integration_tests/twitter-pulsar/docker-compose.yml @@ -0,0 +1,67 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + image: "apachepulsar/pulsar:2.9.1" + command: bin/pulsar standalone + ports: + - 8080:8080 + - 6650:6650 + hostname: message_queue + container_name: message_queue + stop_grace_period: 2s + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode twitter --qps 2 pulsar --brokers message_queue:6650 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/twitter-pulsar/query.sql b/integration_tests/twitter-pulsar/query.sql new file mode 100644 index 0000000000000..3e64784a04aa7 --- /dev/null +++ b/integration_tests/twitter-pulsar/query.sql @@ -0,0 +1,8 @@ +SELECT + * +FROM + hot_hashtags +ORDER BY + hashtag_occurrences DESC +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/twitter/avro.json b/integration_tests/twitter/avro.json new file mode 100644 index 0000000000000..650685d3ee03a --- /dev/null +++ b/integration_tests/twitter/avro.json @@ -0,0 +1,27 @@ +{ + "type": "record", + "name": "Event", + "fields": [ + { + "name": "data", + "type": "record", + "fields": [ + { "name": "id", "type": "string" }, + { "name": "text", "type": "string" }, + { "name": "lang", "type": "string" }, + { "name": "created_at", "type": "string" } + ] + }, + { + "name": "author", + "type": "record", + "fields": [ + { "name": "id", "type": "string" }, + { "name": "name", "type": "string" }, + { "name": "username", "type": "string" }, + { "name": "created_at", "type": "string" }, + { "name": "followers", "type": "long" } + ] + } + ] +} diff --git a/integration_tests/twitter/avro/create_mv.sql b/integration_tests/twitter/avro/create_mv.sql new file mode 100644 index 0000000000000..06d2eb14e4074 --- /dev/null +++ b/integration_tests/twitter/avro/create_mv.sql @@ -0,0 +1,21 @@ +-- +-- Find the top10 hotest hashtags. +-- +CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( + SELECT + unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, + (data).created_at :: timestamptz AS created_at + FROM + twitter +) +SELECT + hashtag, + COUNT(*) AS hashtag_occurrences, + window_start +FROM + TUMBLE(tags, created_at, INTERVAL '1 day') +GROUP BY + hashtag, + window_start +ORDER BY + hashtag_occurrences; \ No newline at end of file diff --git a/integration_tests/twitter/avro/create_source.sql b/integration_tests/twitter/avro/create_source.sql new file mode 100644 index 0000000000000..b0e53b3c1a54a --- /dev/null +++ b/integration_tests/twitter/avro/create_source.sql @@ -0,0 +1,6 @@ +CREATE SOURCE twitter WITH ( + connector = 'kafka', + topic = 'twitter', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT AVRO MESSAGE 'Event' ROW SCHEMA LOCATION 'http://file_server:8080/avro.json'; \ No newline at end of file diff --git a/integration_tests/twitter/create_mv.sql b/integration_tests/twitter/create_mv.sql new file mode 100644 index 0000000000000..5ad3f4fc3d92e --- /dev/null +++ b/integration_tests/twitter/create_mv.sql @@ -0,0 +1,21 @@ +-- +-- Find the top10 hotest hashtags. +-- +CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( + SELECT + unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, + (data).created_at AS created_at + FROM + twitter +) +SELECT + hashtag, + COUNT(*) AS hashtag_occurrences, + window_start +FROM + TUMBLE(tags, created_at, INTERVAL '1 day') +GROUP BY + hashtag, + window_start +ORDER BY + hashtag_occurrences; \ No newline at end of file diff --git a/integration_tests/twitter/create_source.sql b/integration_tests/twitter/create_source.sql new file mode 100644 index 0000000000000..943ca51fd01f6 --- /dev/null +++ b/integration_tests/twitter/create_source.sql @@ -0,0 +1,19 @@ +-- +-- The Kafka source version +-- +CREATE SOURCE twitter ( + data STRUCT < created_at TIMESTAMPTZ, + id VARCHAR, + text VARCHAR, + lang VARCHAR >, + author STRUCT < created_at TIMESTAMPTZ, + id VARCHAR, + name VARCHAR, + username VARCHAR, + followers INT > +) WITH ( + connector = 'kafka', + topic = 'twitter', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT JSON; \ No newline at end of file diff --git a/integration_tests/twitter/data_check b/integration_tests/twitter/data_check new file mode 100644 index 0000000000000..fae1c0be8dc4b --- /dev/null +++ b/integration_tests/twitter/data_check @@ -0,0 +1 @@ +twitter,hot_hashtags \ No newline at end of file diff --git a/integration_tests/twitter/docker-compose.yml b/integration_tests/twitter/docker-compose.yml new file mode 100644 index 0000000000000..7ac085e0a17ee --- /dev/null +++ b/integration_tests/twitter/docker-compose.yml @@ -0,0 +1,62 @@ +--- +version: "3" +services: + compactor-0: + extends: + file: ../../docker/docker-compose.yml + service: compactor-0 + compute-node-0: + extends: + file: ../../docker/docker-compose.yml + service: compute-node-0 + etcd-0: + extends: + file: ../../docker/docker-compose.yml + service: etcd-0 + frontend-node-0: + extends: + file: ../../docker/docker-compose.yml + service: frontend-node-0 + grafana-0: + extends: + file: ../../docker/docker-compose.yml + service: grafana-0 + meta-node-0: + extends: + file: ../../docker/docker-compose.yml + service: meta-node-0 + minio-0: + extends: + file: ../../docker/docker-compose.yml + service: minio-0 + prometheus-0: + extends: + file: ../../docker/docker-compose.yml + service: prometheus-0 + message_queue: + extends: + file: ../../docker/docker-compose.yml + service: message_queue + datagen: + build: ../datagen + depends_on: [message_queue] + command: + - /bin/sh + - -c + - /datagen --mode twitter --qps 2 kafka --brokers message_queue:29092 + restart: always + container_name: datagen +volumes: + compute-node-0: + external: false + etcd-0: + external: false + grafana-0: + external: false + minio-0: + external: false + prometheus-0: + external: false + message_queue: + external: false +name: risingwave-compose diff --git a/integration_tests/twitter/pb/create_mv.sql b/integration_tests/twitter/pb/create_mv.sql new file mode 100644 index 0000000000000..c08722bacdbb3 --- /dev/null +++ b/integration_tests/twitter/pb/create_mv.sql @@ -0,0 +1,21 @@ +-- +-- Find the top10 hotest hashtags. +-- +CREATE MATERIALIZED VIEW hot_hashtags AS WITH tags AS ( + SELECT + unnest(regexp_matches((data).text, '#\w+', 'g')) AS hashtag, + (data).created_at :: timestamp AS created_at + FROM + twitter +) +SELECT + hashtag, + COUNT(*) AS hashtag_occurrences, + window_start +FROM + TUMBLE(tags, created_at, INTERVAL '1 day') +GROUP BY + hashtag, + window_start +ORDER BY + hashtag_occurrences; \ No newline at end of file diff --git a/integration_tests/twitter/pb/create_source.sql b/integration_tests/twitter/pb/create_source.sql new file mode 100644 index 0000000000000..de6e2c2320bf0 --- /dev/null +++ b/integration_tests/twitter/pb/create_source.sql @@ -0,0 +1,6 @@ +CREATE SOURCE twitter WITH ( + connector = 'kafka', + topic = 'twitter', + properties.bootstrap.server = 'message_queue:29092', + scan.startup.mode = 'earliest' +) ROW FORMAT PROTOBUF MESSAGE 'twitter.schema.Event' ROW SCHEMA LOCATION 'http://file_server:8080/schema'; \ No newline at end of file diff --git a/integration_tests/twitter/query.sql b/integration_tests/twitter/query.sql new file mode 100644 index 0000000000000..3e64784a04aa7 --- /dev/null +++ b/integration_tests/twitter/query.sql @@ -0,0 +1,8 @@ +SELECT + * +FROM + hot_hashtags +ORDER BY + hashtag_occurrences DESC +LIMIT + 10; \ No newline at end of file diff --git a/integration_tests/twitter/schema b/integration_tests/twitter/schema new file mode 100644 index 0000000000000..e4c57548e9a34 --- /dev/null +++ b/integration_tests/twitter/schema @@ -0,0 +1,19 @@ + + + twitter.prototwitter.schema"d +Event- +data ( 2.twitter.schema.TweetDataRdata, +author ( 2.twitter.schema.UserRauthor"b + TweetData +id ( Rid +text ( Rtext +lang ( Rlang + +created_at ( R createdAt" +User +id ( Rid +name ( Rname + user_name ( RuserName + +created_at ( R createdAt + followers (R followersBZ twitter/protobproto3 \ No newline at end of file diff --git a/integration_tests/twitter/twitter.proto b/integration_tests/twitter/twitter.proto new file mode 100644 index 0000000000000..14df4a932c714 --- /dev/null +++ b/integration_tests/twitter/twitter.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package twitter.schema; +option go_package = "twitter/proto"; + +message Event { + TweetData data = 1; + User author = 2; +} + +message TweetData { + string id = 1; + string text = 2; + string lang = 3; + string created_at = 4; +} + +message User { + string id = 1; + string name = 2; + string user_name = 3; + string created_at = 4; + int64 followers = 5; +} diff --git a/java/com_risingwave_java_binding_Binding.h b/java/com_risingwave_java_binding_Binding.h index 0f4181f2cefb7..bd03892223a6d 100644 --- a/java/com_risingwave_java_binding_Binding.h +++ b/java/com_risingwave_java_binding_Binding.h @@ -17,26 +17,26 @@ JNIEXPORT jint JNICALL Java_com_risingwave_java_binding_Binding_vnodeCount /* * Class: com_risingwave_java_binding_Binding - * Method: iteratorNew + * Method: hummockIteratorNew * Signature: ([B)J */ -JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_iteratorNew +JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_hummockIteratorNew (JNIEnv *, jclass, jbyteArray); /* * Class: com_risingwave_java_binding_Binding - * Method: iteratorNext + * Method: hummockIteratorNext * Signature: (J)J */ -JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_iteratorNext +JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_hummockIteratorNext (JNIEnv *, jclass, jlong); /* * Class: com_risingwave_java_binding_Binding - * Method: iteratorClose + * Method: hummockIteratorClose * Signature: (J)V */ -JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_iteratorClose +JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_hummockIteratorClose (JNIEnv *, jclass, jlong); /* @@ -47,6 +47,14 @@ JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_iteratorClose JNIEXPORT jbyteArray JNICALL Java_com_risingwave_java_binding_Binding_rowGetKey (JNIEnv *, jclass, jlong); +/* + * Class: com_risingwave_java_binding_Binding + * Method: rowGetOp + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_com_risingwave_java_binding_Binding_rowGetOp + (JNIEnv *, jclass, jlong); + /* * Class: com_risingwave_java_binding_Binding * Method: rowIsNull @@ -119,6 +127,30 @@ JNIEXPORT jstring JNICALL Java_com_risingwave_java_binding_Binding_rowGetStringV JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_rowClose (JNIEnv *, jclass, jlong); +/* + * Class: com_risingwave_java_binding_Binding + * Method: streamChunkIteratorNew + * Signature: ([B)J + */ +JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_streamChunkIteratorNew + (JNIEnv *, jclass, jbyteArray); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: streamChunkIteratorNext + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_com_risingwave_java_binding_Binding_streamChunkIteratorNext + (JNIEnv *, jclass, jlong); + +/* + * Class: com_risingwave_java_binding_Binding + * Method: streamChunkIteratorClose + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_com_risingwave_java_binding_Binding_streamChunkIteratorClose + (JNIEnv *, jclass, jlong); + #ifdef __cplusplus } #endif diff --git a/java/common-utils/src/main/java/com/risingwave/java/utils/MetaClient.java b/java/common-utils/src/main/java/com/risingwave/java/utils/MetaClient.java index d8cde5c6d300e..2bc3411920bfa 100644 --- a/java/common-utils/src/main/java/com/risingwave/java/utils/MetaClient.java +++ b/java/common-utils/src/main/java/com/risingwave/java/utils/MetaClient.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.java.utils; import com.risingwave.proto.*; diff --git a/java/common-utils/src/main/java/com/risingwave/java/utils/MinioUrlParser.java b/java/common-utils/src/main/java/com/risingwave/java/utils/MinioUrlParser.java index a68f26aa3841e..02b71ce32afc0 100644 --- a/java/common-utils/src/main/java/com/risingwave/java/utils/MinioUrlParser.java +++ b/java/common-utils/src/main/java/com/risingwave/java/utils/MinioUrlParser.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.java.utils; import static io.grpc.Status.INVALID_ARGUMENT; diff --git a/java/common-utils/src/main/java/com/risingwave/java/utils/VnodeHelper.java b/java/common-utils/src/main/java/com/risingwave/java/utils/VnodeHelper.java index eb02c4d88ec52..1b55b234d2e3e 100644 --- a/java/common-utils/src/main/java/com/risingwave/java/utils/VnodeHelper.java +++ b/java/common-utils/src/main/java/com/risingwave/java/utils/VnodeHelper.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.java.utils; import java.util.ArrayList; diff --git a/java/common-utils/src/test/java/com/risingwave/connector/utils/MinioUrlParserTest.java b/java/common-utils/src/test/java/com/risingwave/connector/utils/MinioUrlParserTest.java index f7907b69e9c7b..1d2d1be14d0ba 100644 --- a/java/common-utils/src/test/java/com/risingwave/connector/utils/MinioUrlParserTest.java +++ b/java/common-utils/src/test/java/com/risingwave/connector/utils/MinioUrlParserTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.utils; import static org.junit.Assert.assertEquals; diff --git a/java/common-utils/src/test/java/com/risingwave/connector/utils/VnodeHelperTest.java b/java/common-utils/src/test/java/com/risingwave/connector/utils/VnodeHelperTest.java index a39ea04757751..fcb56a0203035 100644 --- a/java/common-utils/src/test/java/com/risingwave/connector/utils/VnodeHelperTest.java +++ b/java/common-utils/src/test/java/com/risingwave/connector/utils/VnodeHelperTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.utils; import com.risingwave.java.utils.VnodeHelper; diff --git a/java/connector-node/assembly/scripts/start-service.sh b/java/connector-node/assembly/scripts/start-service.sh index 75792b7457627..86b59386c43a3 100755 --- a/java/connector-node/assembly/scripts/start-service.sh +++ b/java/connector-node/assembly/scripts/start-service.sh @@ -15,7 +15,8 @@ while getopts ":h:p:" o; do done shift $((OPTIND-1)) -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +DIR="$( cd "$( dirname "$0" )" && pwd )" MAIN='com.risingwave.connector.ConnectorService' PORT=50051 @@ -24,4 +25,4 @@ if [ -z "${port}" ]; then port=$PORT fi -java -classpath "${DIR}/libs/*" $MAIN --port ${port} +java -classpath "${DIR}/libs/*" -Djava.library.path="${RW_JAVA_BINDING_LIB_PATH}" $MAIN --port ${port} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/PkComparator.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/PkComparator.java index 5daced8f90fad..6c93996d3d333 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/PkComparator.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/PkComparator.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api; import io.grpc.Status; diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/TableSchema.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/TableSchema.java index d10a913f75a50..053ba1e329920 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/TableSchema.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/TableSchema.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api; import com.google.common.collect.Lists; @@ -91,4 +105,18 @@ public static TableSchema fromProto(ConnectorServiceProto.TableSchema tableSchem public List getPrimaryKeys() { return primaryKeys; } + + @Override + public String toString() { + return "TableSchema{" + + "columnNames=" + + columnNames + + ", columns=" + + columns + + ", columnIndices=" + + columnIndices + + ", primaryKeys=" + + primaryKeys + + '}'; + } } diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java new file mode 100644 index 0000000000000..e443a7d3e286e --- /dev/null +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkRow.java @@ -0,0 +1,45 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.api.sink; + +import com.risingwave.proto.Data; + +public class ArraySinkRow implements SinkRow { + public final Object[] values; + public final Data.Op op; + + public ArraySinkRow(Data.Op op, Object... value) { + this.op = op; + this.values = value; + } + + @Override + public Object get(int index) { + return values[index]; + } + + @Override + public Data.Op getOp() { + return op; + } + + @Override + public int size() { + return values.length; + } + + @Override + public void close() throws Exception {} +} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkrow.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkrow.java deleted file mode 100644 index 43d013dd069ad..0000000000000 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/ArraySinkrow.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.risingwave.connector.api.sink; - -import com.risingwave.proto.Data; - -public class ArraySinkrow implements SinkRow { - public final Object[] values; - public final Data.Op op; - - public ArraySinkrow(Data.Op op, Object... value) { - this.op = op; - this.values = value; - } - - @Override - public Object get(int index) { - return values[index]; - } - - @Override - public Data.Op getOp() { - return op; - } - - @Override - public int size() { - return values.length; - } -} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/CloseableIterator.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/CloseableIterator.java new file mode 100644 index 0000000000000..b866e6dddcf55 --- /dev/null +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/CloseableIterator.java @@ -0,0 +1,21 @@ +/* + * Copyright 2023 RisingWave Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.risingwave.connector.api.sink; + +import java.util.Iterator; + +public interface CloseableIterator extends AutoCloseable, Iterator {} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Deserializer.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Deserializer.java new file mode 100644 index 0000000000000..36bd9386f51e2 --- /dev/null +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Deserializer.java @@ -0,0 +1,22 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.api.sink; + +import com.risingwave.proto.ConnectorServiceProto; + +public interface Deserializer { + CloseableIterator deserialize( + ConnectorServiceProto.SinkStreamRequest.WriteBatch writeBatch); +} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Sink.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Sink.java index d560a78630b10..4c02c0cc59cbc 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Sink.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/Sink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.sink; import java.util.Iterator; diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkBase.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkBase.java index 2a6d048d39ef9..81b7207b49dd1 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkBase.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkBase.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.sink; import com.risingwave.connector.api.TableSchema; diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkFactory.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkFactory.java index ff12363466cc8..723681a1d362f 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkFactory.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkFactory.java @@ -1,10 +1,25 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.sink; import com.risingwave.connector.api.TableSchema; +import com.risingwave.proto.Catalog.SinkType; import java.util.Map; public interface SinkFactory { SinkBase create(TableSchema tableSchema, Map tableProperties); - void validate(TableSchema tableSchema, Map tableProperties); + void validate(TableSchema tableSchema, Map tableProperties, SinkType sinkType); } diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java index ece1a881d269f..0ae0aa3facf7e 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/SinkRow.java @@ -1,11 +1,25 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.sink; import com.risingwave.proto.Data; -public interface SinkRow { - public Object get(int index); +public interface SinkRow extends AutoCloseable { + Object get(int index); - public Data.Op getOp(); + Data.Op getOp(); - public int size(); + int size(); } diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/TrivialCloseIterator.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/TrivialCloseIterator.java new file mode 100644 index 0000000000000..e89b9330bd9c2 --- /dev/null +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/sink/TrivialCloseIterator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2023 RisingWave Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.risingwave.connector.api.sink; + +import java.util.Iterator; + +public class TrivialCloseIterator implements CloseableIterator { + + private final Iterator inner; + + public TrivialCloseIterator(Iterator inner) { + this.inner = inner; + } + + @Override + public void close() throws Exception {} + + @Override + public boolean hasNext() { + return inner.hasNext(); + } + + @Override + public E next() { + return inner.next(); + } +} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngine.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngine.java index d66dd9d1a7674..443cf7411c262 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngine.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngine.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.source; import com.risingwave.proto.ConnectorServiceProto; diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngineRunner.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngineRunner.java index 941f4591be13c..92648fa228081 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngineRunner.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/CdcEngineRunner.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.source; public interface CdcEngineRunner { diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/ConnectorConfig.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/ConnectorConfig.java deleted file mode 100644 index 8d9cf56ca8bcf..0000000000000 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/ConnectorConfig.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.risingwave.connector.api.source; - -import java.util.HashMap; -import java.util.Map; - -public class ConnectorConfig { - /* Common configs */ - public static final String HOST = "hostname"; - public static final String PORT = "port"; - public static final String USER = "username"; - public static final String PASSWORD = "password"; - - public static final String DB_NAME = "database.name"; - public static final String TABLE_NAME = "table.name"; - - /* MySQL specified configs */ - public static final String MYSQL_SERVER_ID = "server.id"; - - /* Postgres specified configs */ - public static final String PG_SLOT_NAME = "slot.name"; - public static final String PG_SCHEMA_NAME = "schema.name"; - - public static Map extractDebeziumProperties(Map properties) { - // retain only debezium properties if any - var userProps = new HashMap<>(properties); - userProps.remove(ConnectorConfig.HOST); - userProps.remove(ConnectorConfig.PORT); - userProps.remove(ConnectorConfig.USER); - userProps.remove(ConnectorConfig.PASSWORD); - userProps.remove(ConnectorConfig.DB_NAME); - userProps.remove(ConnectorConfig.TABLE_NAME); - userProps.remove(ConnectorConfig.MYSQL_SERVER_ID); - userProps.remove(ConnectorConfig.PG_SLOT_NAME); - userProps.remove(ConnectorConfig.PG_SCHEMA_NAME); - return userProps; - } -} diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceConfig.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceConfig.java index 8a231c21bb2ec..60dea58d8bbb0 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceConfig.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceConfig.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.source; import java.util.Properties; diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceHandler.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceHandler.java index 379d2462a2d00..d37730635a60b 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceHandler.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceHandler.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.source; import com.risingwave.proto.ConnectorServiceProto; @@ -5,7 +19,7 @@ /** Handler for RPC request */ public interface SourceHandler { - void handle( + void startSource( ServerCallStreamObserver responseObserver); } diff --git a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceTypeE.java b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceTypeE.java index 79a1434304c36..29cf05206187a 100644 --- a/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceTypeE.java +++ b/java/connector-node/connector-api/src/main/java/com/risingwave/connector/api/source/SourceTypeE.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.api.source; import com.risingwave.proto.ConnectorServiceProto; diff --git a/java/connector-node/python-client/integration_tests.py b/java/connector-node/python-client/integration_tests.py index 7d45569040843..7480902519e61 100644 --- a/java/connector-node/python-client/integration_tests.py +++ b/java/connector-node/python-client/integration_tests.py @@ -1,3 +1,18 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import os import argparse import json @@ -31,8 +46,9 @@ def test_upsert_sink(type, prop, input_file): stub = connector_service_pb2_grpc.ConnectorServiceStub(channel) request_list = [ connector_service_pb2.SinkStreamRequest(start=connector_service_pb2.SinkStreamRequest.StartSink( + format=connector_service_pb2.SinkPayloadFormat.JSON, sink_config=connector_service_pb2.SinkConfig( - sink_type=type, + connector_type=type, properties=prop, table_schema=make_mock_schema() ) @@ -70,8 +86,9 @@ def test_sink(type, prop, input_file): stub = connector_service_pb2_grpc.ConnectorServiceStub(channel) request_list = [ connector_service_pb2.SinkStreamRequest(start=connector_service_pb2.SinkStreamRequest.StartSink( + format=connector_service_pb2.SinkPayloadFormat.JSON, sink_config=connector_service_pb2.SinkConfig( - sink_type=type, + connector_type=type, properties=prop, table_schema=make_mock_schema() ) @@ -145,18 +162,22 @@ def test_print_sink(input_file): def test_iceberg_sink(input_file): test_sink("iceberg", - {"sink.mode":"append-only", - "location.type":"minio", - "warehouse.path":"minio://minioadmin:minioadmin@127.0.0.1:9000/bucket", + {"type":"append-only", + "warehouse.path":"s3a://bucket", + "s3.endpoint": "http://127.0.0.1:9000", + "s3.access.key": "minioadmin", + "s3.secret.key": "minioadmin", "database.name":"demo_db", "table.name":"demo_table"}, input_file) def test_upsert_iceberg_sink(input_file): test_upsert_sink("iceberg", - {"sink.mode":"upsert", - "location.type":"minio", - "warehouse.path":"minio://minioadmin:minioadmin@127.0.0.1:9000/bucket", + {"type":"upsert", + "warehouse.path":"s3a://bucket", + "s3.endpoint": "http://127.0.0.1:9000", + "s3.access.key": "minioadmin", + "s3.secret.key": "minioadmin", "database.name":"demo_db", "table.name":"demo_table"}, input_file) diff --git a/java/connector-node/python-client/pyspark-util.py b/java/connector-node/python-client/pyspark-util.py index 1591c646de58c..7b749ce95e773 100644 --- a/java/connector-node/python-client/pyspark-util.py +++ b/java/connector-node/python-client/pyspark-util.py @@ -1,3 +1,18 @@ +# Copyright 2023 RisingWave Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import argparse import json from pyspark.sql import SparkSession, Row diff --git a/java/connector-node/risingwave-connector-service/pom.xml b/java/connector-node/risingwave-connector-service/pom.xml index 89a02597ef352..581085f43b30a 100644 --- a/java/connector-node/risingwave-connector-service/pom.xml +++ b/java/connector-node/risingwave-connector-service/pom.xml @@ -25,6 +25,10 @@ com.risingwave.java proto + + com.risingwave.java + java-binding + com.risingwave.java connector-api @@ -32,8 +36,8 @@ - org.slf4j - slf4j-api + org.apache.logging.log4j + log4j-api org.apache.logging.log4j @@ -47,7 +51,14 @@ com.google.code.gson gson - + + org.apache.commons + commons-text + + + commons-io + commons-io + commons-cli commons-cli diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorService.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorService.java index eebc0d2d16e92..2e3d08c5c6346 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorService.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorService.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.metrics.ConnectorNodeMetrics; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorServiceImpl.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorServiceImpl.java index eb3744357a7ae..12b7b00a7eb32 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorServiceImpl.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/ConnectorServiceImpl.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.proto.ConnectorServiceGrpc; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/Deserializer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/Deserializer.java deleted file mode 100644 index 37adb0d08da08..0000000000000 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/Deserializer.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.risingwave.connector; - -import com.risingwave.connector.api.sink.SinkRow; -import java.util.Iterator; - -public interface Deserializer { - Iterator deserialize(Object payload); -} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java index a001934bb7eec..085ae800bdb0f 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; @@ -43,25 +57,28 @@ public FileSink(String sinkPath, TableSchema tableSchema) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - SinkRow row = rows.next(); - switch (row.getOp()) { - case INSERT: - String buf = - new Gson() - .toJson( - IntStream.range(0, row.size()) - .mapToObj(row::get) - .toArray()); - try { - sinkWriter.write(buf + System.lineSeparator()); - } catch (IOException e) { - throw INTERNAL.withCause(e).asRuntimeException(); - } - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); + try (SinkRow row = rows.next()) { + switch (row.getOp()) { + case INSERT: + String buf = + new Gson() + .toJson( + IntStream.range(0, row.size()) + .mapToObj(row::get) + .toArray()); + try { + sinkWriter.write(buf + System.lineSeparator()); + } catch (IOException e) { + throw INTERNAL.withCause(e).asRuntimeException(); + } + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSinkFactory.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSinkFactory.java index 127796820eb56..52cc1f125a48c 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSinkFactory.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/FileSinkFactory.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; @@ -5,6 +19,7 @@ import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkBase; import com.risingwave.connector.api.sink.SinkFactory; +import com.risingwave.proto.Catalog.SinkType; import java.util.Map; public class FileSinkFactory implements SinkFactory { @@ -12,15 +27,13 @@ public class FileSinkFactory implements SinkFactory { @Override public SinkBase create(TableSchema tableSchema, Map tableProperties) { - // TODO: Remove this call to `validate` after supporting sink validation in risingwave. - validate(tableSchema, tableProperties); - String sinkPath = tableProperties.get(OUTPUT_PATH_PROP); return new FileSink(sinkPath, tableSchema); } @Override - public void validate(TableSchema tableSchema, Map tableProperties) { + public void validate( + TableSchema tableSchema, Map tableProperties, SinkType sinkType) { if (!tableProperties.containsKey(OUTPUT_PATH_PROP)) { throw INVALID_ARGUMENT .withDescription(String.format("%s is not specified", OUTPUT_PATH_PROP)) diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JsonDeserializer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JsonDeserializer.java index 1b627d48aa8ec..837989120eab3 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JsonDeserializer.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/JsonDeserializer.java @@ -1,14 +1,29 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.INVALID_ARGUMENT; import com.google.gson.Gson; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; -import com.risingwave.connector.api.sink.SinkRow; +import com.risingwave.connector.api.sink.*; +import com.risingwave.proto.ConnectorServiceProto; import com.risingwave.proto.ConnectorServiceProto.SinkStreamRequest.WriteBatch.JsonPayload; import com.risingwave.proto.Data; -import java.util.Iterator; +import java.math.BigDecimal; +import java.sql.Timestamp; import java.util.Map; public class JsonDeserializer implements Deserializer { @@ -18,94 +33,144 @@ public JsonDeserializer(TableSchema tableSchema) { this.tableSchema = tableSchema; } + // Encoding here should be consistent with `datum_to_json_object()` in + // src/connector/src/sink/mod.rs @Override - public Iterator deserialize(Object payload) { - if (!(payload instanceof JsonPayload)) { + public CloseableIterator deserialize( + ConnectorServiceProto.SinkStreamRequest.WriteBatch writeBatch) { + if (!writeBatch.hasJsonPayload()) { throw INVALID_ARGUMENT - .withDescription("expected JsonPayload, got " + payload.getClass().getName()) + .withDescription("expected JsonPayload, got " + writeBatch.getPayloadCase()) .asRuntimeException(); } - JsonPayload jsonPayload = (JsonPayload) payload; - return jsonPayload.getRowOpsList().stream() - .map( - rowOp -> { - Map columnValues = new Gson().fromJson(rowOp.getLine(), Map.class); - Object[] values = new Object[columnValues.size()]; - for (String columnName : tableSchema.getColumnNames()) { - if (!columnValues.containsKey(columnName)) { - throw INVALID_ARGUMENT - .withDescription( - "column " + columnName + " not found in json") - .asRuntimeException(); - } - Data.DataType.TypeName typeName = - tableSchema.getColumnType(columnName); - values[tableSchema.getColumnIndex(columnName)] = - validateJsonDataTypes( - typeName, columnValues.get(columnName)); - } - return (SinkRow) new ArraySinkrow(rowOp.getOpType(), values); - }) - .iterator(); + JsonPayload jsonPayload = writeBatch.getJsonPayload(); + return new TrivialCloseIterator<>( + jsonPayload.getRowOpsList().stream() + .map( + rowOp -> { + Map columnValues = + new Gson().fromJson(rowOp.getLine(), Map.class); + Object[] values = new Object[columnValues.size()]; + for (String columnName : tableSchema.getColumnNames()) { + if (!columnValues.containsKey(columnName)) { + throw INVALID_ARGUMENT + .withDescription( + "column " + + columnName + + " not found in json") + .asRuntimeException(); + } + Data.DataType.TypeName typeName = + tableSchema.getColumnType(columnName); + values[tableSchema.getColumnIndex(columnName)] = + validateJsonDataTypes( + typeName, columnValues.get(columnName)); + } + return (SinkRow) new ArraySinkRow(rowOp.getOpType(), values); + }) + .iterator()); } - private static Object validateJsonDataTypes(Data.DataType.TypeName typeName, Object value) { - if (value instanceof Double - && (Double) value % 1 == 0 - && typeName != Data.DataType.TypeName.DOUBLE - && typeName != Data.DataType.TypeName.FLOAT) { - return (int) (double) value; + private static Long castLong(Object value) { + if (value instanceof Integer) { + return ((Integer) value).longValue(); + } else if (value instanceof Double) { + double d = (Double) value; + if (d % 1.0 != 0.0) { + + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription( + "unable to cast into long from non-integer double value: " + d) + .asRuntimeException(); + } + return ((Double) value).longValue(); + } else if (value instanceof Long) { + return (Long) value; + } else if (value instanceof Short) { + return ((Short) value).longValue(); + } else if (value instanceof Float) { + double f = (Float) value; + if (f % 1.0 != 0.0) { + + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription( + "unable to cast into long from non-integer float value: " + f) + .asRuntimeException(); + } + return ((Float) value).longValue(); + } else { + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription("unable to cast into long from " + value.getClass()) + .asRuntimeException(); } + } + + private static Double castDouble(Object value) { + if (value instanceof Double) { + return (Double) value; + } else if (value instanceof Float) { + return ((Float) value).doubleValue(); + } else { + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription("unable to cast into double from " + value.getClass()) + .asRuntimeException(); + } + } + + private static BigDecimal castDecimal(Object value) { + if (value instanceof String) { + // FIXME(eric): See `datum_to_json_object()` in src/connector/src/sink/mod.rs + return new BigDecimal((String) value); + } else if (value instanceof BigDecimal) { + return (BigDecimal) value; + } else { + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription("unable to cast into double from " + value.getClass()) + .asRuntimeException(); + } + } + + private static Object validateJsonDataTypes(Data.DataType.TypeName typeName, Object value) { switch (typeName) { + case INT16: + return castLong(value).shortValue(); case INT32: + return castLong(value).intValue(); case INT64: - case INT16: - if (!(value instanceof Integer)) { - throw io.grpc.Status.INVALID_ARGUMENT - .withDescription("Expected int, got " + value.getClass()) - .asRuntimeException(); - } - break; + return castLong(value); case VARCHAR: if (!(value instanceof String)) { throw io.grpc.Status.INVALID_ARGUMENT .withDescription("Expected string, got " + value.getClass()) .asRuntimeException(); } - break; + return value; case DOUBLE: - if (!(value instanceof Double)) { - throw io.grpc.Status.INVALID_ARGUMENT - .withDescription("Expected double, got " + value.getClass()) - .asRuntimeException(); - } - break; + return castDouble(value); case FLOAT: - if (!(value instanceof Float)) { - throw io.grpc.Status.INVALID_ARGUMENT - .withDescription("Expected float, got " + value.getClass()) - .asRuntimeException(); - } - break; + return castDouble(value).floatValue(); case DECIMAL: - if (!(value instanceof Float || value instanceof Double)) { - throw io.grpc.Status.INVALID_ARGUMENT - .withDescription("Expected float, got " + value.getClass()) - .asRuntimeException(); - } - break; + return castDecimal(value); case BOOLEAN: if (!(value instanceof Boolean)) { throw io.grpc.Status.INVALID_ARGUMENT .withDescription("Expected boolean, got " + value.getClass()) .asRuntimeException(); } - break; + return value; + case TIMESTAMP: + case TIMESTAMPTZ: + if (!(value instanceof String)) { + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription( + "Expected timestamp in string, got " + value.getClass()) + .asRuntimeException(); + } + return Timestamp.valueOf((String) value); default: throw io.grpc.Status.INVALID_ARGUMENT .withDescription("unsupported type " + typeName) .asRuntimeException(); } - return value; } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSink.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSink.java index 2ae3c80f85151..3c9483b844a38 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSink.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; @@ -26,13 +40,18 @@ public PrintSink(Map properties, TableSchema tableSchema, PrintS @Override public void write(Iterator rows) { while (rows.hasNext()) { - SinkRow row = rows.next(); - out.println( - "PrintSink: " - + row.getOp().name() - + " values " - + Arrays.toString( - IntStream.range(0, row.size()).mapToObj(row::get).toArray())); + try (SinkRow row = rows.next()) { + out.println( + "PrintSink: " + + row.getOp().name() + + " values " + + Arrays.toString( + IntStream.range(0, row.size()) + .mapToObj(row::get) + .toArray())); + } catch (Exception e) { + throw new RuntimeException(e); + } } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSinkFactory.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSinkFactory.java index ff5269a1b2c9b..72d16141e6f7a 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSinkFactory.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/PrintSinkFactory.java @@ -1,8 +1,23 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkBase; import com.risingwave.connector.api.sink.SinkFactory; +import com.risingwave.proto.Catalog.SinkType; import java.util.Map; public class PrintSinkFactory implements SinkFactory { @@ -13,5 +28,6 @@ public SinkBase create(TableSchema tableSchema, Map tablePropert } @Override - public void validate(TableSchema tableSchema, Map tableProperties) {} + public void validate( + TableSchema tableSchema, Map tableProperties, SinkType sinkType) {} } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkStreamObserver.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkStreamObserver.java index 3117b4d251410..c0f59ac002e9a 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkStreamObserver.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkStreamObserver.java @@ -1,11 +1,24 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.SinkBase; -import com.risingwave.connector.api.sink.SinkFactory; -import com.risingwave.connector.api.sink.SinkRow; +import com.risingwave.connector.api.sink.*; +import com.risingwave.connector.deserializer.StreamChunkDeserializer; import com.risingwave.metrics.ConnectorNodeMetrics; import com.risingwave.metrics.MonitoredRowIterator; import com.risingwave.proto.ConnectorServiceProto; @@ -14,7 +27,6 @@ import com.risingwave.proto.ConnectorServiceProto.SinkResponse.SyncResponse; import com.risingwave.proto.ConnectorServiceProto.SinkResponse.WriteResponse; import io.grpc.stub.StreamObserver; -import java.util.Iterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,7 +61,7 @@ public void onNext(ConnectorServiceProto.SinkStreamRequest sinkTask) { .withDescription("Sink is already initialized") .asRuntimeException(); } - bindSink(sinkTask.getStart().getSinkConfig()); + bindSink(sinkTask.getStart().getSinkConfig(), sinkTask.getStart().getFormat()); LOG.debug("Sink initialized"); responseObserver.onNext( ConnectorServiceProto.SinkResponse.newBuilder() @@ -108,28 +120,10 @@ public void onNext(ConnectorServiceProto.SinkStreamRequest sinkTask) { .asRuntimeException(); } - Iterator rows; - switch (sinkTask.getWrite().getPayloadCase()) { - case JSON_PAYLOAD: - if (deserializer == null) { - deserializer = new JsonDeserializer(tableSchema); - } - - if (deserializer instanceof JsonDeserializer) { - rows = deserializer.deserialize(sinkTask.getWrite().getJsonPayload()); - } else { - throw INTERNAL.withDescription( - "invalid payload type: expected JSON, got " - + deserializer.getClass().getName()) - .asRuntimeException(); - } - break; - default: - throw INVALID_ARGUMENT - .withDescription("invalid payload type") - .asRuntimeException(); + try (CloseableIterator rowIter = + deserializer.deserialize(sinkTask.getWrite())) { + sink.write(new MonitoredRowIterator(rowIter)); } - sink.write(new MonitoredRowIterator(rows)); currentBatchId = sinkTask.getWrite().getBatchId(); LOG.debug( @@ -179,25 +173,40 @@ public void onNext(ConnectorServiceProto.SinkStreamRequest sinkTask) { @Override public void onError(Throwable throwable) { LOG.error("sink task error: ", throwable); - if (sink != null) { - sink.drop(); - } + cleanup(); responseObserver.onError(throwable); } @Override public void onCompleted() { LOG.debug("sink task completed"); + cleanup(); + responseObserver.onCompleted(); + } + + private void cleanup() { if (sink != null) { sink.drop(); } - responseObserver.onCompleted(); } - private void bindSink(SinkConfig sinkConfig) { + private void bindSink(SinkConfig sinkConfig, ConnectorServiceProto.SinkPayloadFormat format) { tableSchema = TableSchema.fromProto(sinkConfig.getTableSchema()); - SinkFactory sinkFactory = SinkUtils.getSinkFactory(sinkConfig.getSinkType()); + SinkFactory sinkFactory = SinkUtils.getSinkFactory(sinkConfig.getConnectorType()); sink = sinkFactory.create(tableSchema, sinkConfig.getPropertiesMap()); - ConnectorNodeMetrics.incActiveConnections(sinkConfig.getSinkType(), "node1"); + switch (format) { + case FORMAT_UNSPECIFIED: + case UNRECOGNIZED: + throw INVALID_ARGUMENT + .withDescription("should specify payload format in request") + .asRuntimeException(); + case JSON: + deserializer = new JsonDeserializer(tableSchema); + break; + case STREAM_CHUNK: + deserializer = new StreamChunkDeserializer(tableSchema); + break; + } + ConnectorNodeMetrics.incActiveConnections(sinkConfig.getConnectorType(), "node1"); } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java index e4133a6ba45d7..3aaf643e169ca 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkUtils.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkValidationHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkValidationHandler.java index f1a30f931f0fd..9c6cbfcfa5a39 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkValidationHandler.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/SinkValidationHandler.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; @@ -21,18 +35,21 @@ public void handle(ConnectorServiceProto.ValidateSinkRequest request) { try { SinkConfig sinkConfig = request.getSinkConfig(); TableSchema tableSchema = TableSchema.fromProto(sinkConfig.getTableSchema()); - SinkFactory sinkFactory = SinkUtils.getSinkFactory(sinkConfig.getSinkType()); - sinkFactory.validate(tableSchema, sinkConfig.getPropertiesMap()); + SinkFactory sinkFactory = SinkUtils.getSinkFactory(sinkConfig.getConnectorType()); + sinkFactory.validate(tableSchema, sinkConfig.getPropertiesMap(), request.getSinkType()); } catch (Exception e) { LOG.error("sink validation failed", e); responseObserver.onNext( ConnectorServiceProto.ValidateSinkResponse.newBuilder() .setError( ConnectorServiceProto.ValidationError.newBuilder() - .setErrorMessage(e.toString()) + .setErrorMessage(e.getMessage()) .build()) .build()); responseObserver.onCompleted(); } + + responseObserver.onNext(ConnectorServiceProto.ValidateSinkResponse.newBuilder().build()); + responseObserver.onCompleted(); } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java new file mode 100644 index 0000000000000..b6be0af1d9394 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/connector/deserializer/StreamChunkDeserializer.java @@ -0,0 +1,215 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector.deserializer; + +import static io.grpc.Status.INVALID_ARGUMENT; + +import com.risingwave.connector.api.TableSchema; +import com.risingwave.connector.api.sink.CloseableIterator; +import com.risingwave.connector.api.sink.Deserializer; +import com.risingwave.connector.api.sink.SinkRow; +import com.risingwave.java.binding.StreamChunkIterator; +import com.risingwave.java.binding.StreamChunkRow; +import com.risingwave.proto.ConnectorServiceProto; +import com.risingwave.proto.ConnectorServiceProto.SinkStreamRequest.WriteBatch.StreamChunkPayload; +import com.risingwave.proto.Data; + +public class StreamChunkDeserializer implements Deserializer { + interface ValueGetter { + Object get(StreamChunkRow row); + } + + private final ValueGetter[] valueGetters; + + public StreamChunkDeserializer(TableSchema tableSchema) { + this.valueGetters = buildValueGetter(tableSchema); + } + + static ValueGetter[] buildValueGetter(TableSchema tableSchema) { + String[] colNames = tableSchema.getColumnNames(); + ValueGetter[] ret = new ValueGetter[colNames.length]; + for (int i = 0; i < colNames.length; i++) { + int index = i; + Data.DataType.TypeName typeName = tableSchema.getColumnType(colNames[i]); + switch (typeName) { + case INT16: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getShort(index); + }; + break; + case INT32: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getInt(index); + }; + break; + case INT64: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getLong(index); + }; + break; + case FLOAT: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getFloat(index); + }; + break; + case DOUBLE: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getDouble(index); + }; + break; + case BOOLEAN: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getBoolean(index); + }; + break; + case VARCHAR: + ret[i] = + row -> { + if (row.isNull(index)) { + return null; + } + return row.getString(index); + }; + break; + default: + throw io.grpc.Status.INVALID_ARGUMENT + .withDescription("unsupported type " + typeName) + .asRuntimeException(); + } + } + return ret; + } + + @Override + public CloseableIterator deserialize( + ConnectorServiceProto.SinkStreamRequest.WriteBatch writeBatch) { + if (!writeBatch.hasStreamChunkPayload()) { + throw INVALID_ARGUMENT + .withDescription( + "expected StreamChunkPayload, got " + writeBatch.getPayloadCase()) + .asRuntimeException(); + } + StreamChunkPayload streamChunkPayload = writeBatch.getStreamChunkPayload(); + return new StreamChunkIteratorWrapper( + new StreamChunkIterator(streamChunkPayload.getBinaryData().toByteArray()), + valueGetters); + } + + static class StreamChunkRowWrapper implements SinkRow { + + private boolean isClosed; + private final StreamChunkRow inner; + private final ValueGetter[] valueGetters; + + StreamChunkRowWrapper(StreamChunkRow inner, ValueGetter[] valueGetters) { + this.inner = inner; + this.valueGetters = valueGetters; + this.isClosed = false; + } + + @Override + public Object get(int index) { + return valueGetters[index].get(inner); + } + + @Override + public Data.Op getOp() { + return inner.getOp(); + } + + @Override + public int size() { + return valueGetters.length; + } + + @Override + public void close() { + if (!isClosed) { + this.isClosed = true; + inner.close(); + } + } + } + + static class StreamChunkIteratorWrapper implements CloseableIterator { + private final StreamChunkIterator iter; + private final ValueGetter[] valueGetters; + private StreamChunkRowWrapper row; + + public StreamChunkIteratorWrapper(StreamChunkIterator iter, ValueGetter[] valueGetters) { + this.iter = iter; + this.valueGetters = valueGetters; + this.row = null; + } + + @Override + public void close() { + iter.close(); + try { + if (row != null) { + row.close(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public boolean hasNext() { + if (this.row != null) { + throw new RuntimeException( + "cannot call hasNext again when there is row not consumed by next"); + } + StreamChunkRow row = iter.next(); + if (row == null) { + return false; + } + this.row = new StreamChunkRowWrapper(row, valueGetters); + return true; + } + + @Override + public SinkRow next() { + // Move the sink row outside + SinkRow ret = this.row; + this.row = null; + return ret; + } + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/ConnectorNodeMetrics.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/ConnectorNodeMetrics.java index 8944150e66e35..51009c356b505 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/ConnectorNodeMetrics.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/ConnectorNodeMetrics.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.metrics; import static io.grpc.Status.INTERNAL; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/MonitoredRowIterator.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/MonitoredRowIterator.java index f8ad375953f11..5c5673c905442 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/MonitoredRowIterator.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/metrics/MonitoredRowIterator.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.metrics; import com.risingwave.connector.api.sink.SinkRow; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/SourceRequestHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/SourceRequestHandler.java index 14f715aa4f7db..c94ab2677873e 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/SourceRequestHandler.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/SourceRequestHandler.java @@ -1,9 +1,23 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode; -import com.risingwave.connector.api.source.ConnectorConfig; import com.risingwave.connector.api.source.SourceTypeE; import com.risingwave.proto.ConnectorServiceProto; import com.risingwave.proto.Data.DataType; +import com.risingwave.sourcenode.common.DbzConnectorConfig; import com.risingwave.sourcenode.core.SourceHandlerFactory; import io.grpc.Status; import io.grpc.StatusException; @@ -32,19 +46,19 @@ public void handle(ConnectorServiceProto.GetEventStreamRequest request) { break; case START: var startRequest = request.getStart(); - var handler = - SourceHandlerFactory.createSourceHandler( - SourceTypeE.valueOf(startRequest.getSourceType()), - startRequest.getSourceId(), - startRequest.getStartOffset(), - startRequest.getPropertiesMap()); - if (handler == null) { - LOG.error("failed to create source handler"); - responseObserver.onCompleted(); - } else { - handler.handle( + try { + var handler = + SourceHandlerFactory.createSourceHandler( + SourceTypeE.valueOf(startRequest.getSourceType()), + startRequest.getSourceId(), + startRequest.getStartOffset(), + startRequest.getPropertiesMap()); + handler.startSource( (ServerCallStreamObserver) responseObserver); + } catch (Throwable t) { + LOG.error("failed to start source", t); + responseObserver.onError(t); } break; case REQUEST_NOT_SET: @@ -61,11 +75,11 @@ private void validateDbProperties( String jdbcUrl = toJdbcPrefix(validate.getSourceType()) + "://" - + props.get(ConnectorConfig.HOST) + + props.get(DbzConnectorConfig.HOST) + ":" - + props.get(ConnectorConfig.PORT) + + props.get(DbzConnectorConfig.PORT) + "/" - + props.get(ConnectorConfig.DB_NAME); + + props.get(DbzConnectorConfig.DB_NAME); LOG.debug("validate jdbc url: {}", jdbcUrl); var sqlStmts = new Properties(); @@ -79,8 +93,8 @@ private void validateDbProperties( try (var conn = DriverManager.getConnection( jdbcUrl, - props.get(ConnectorConfig.USER), - props.get(ConnectorConfig.PASSWORD))) { + props.get(DbzConnectorConfig.USER), + props.get(DbzConnectorConfig.PASSWORD))) { // usernamed and password are correct var dbMeta = conn.getMetaData(); @@ -124,8 +138,8 @@ private void validateDbProperties( } // check whether table exist try (var stmt = conn.prepareStatement(sqlStmts.getProperty("mysql.table"))) { - stmt.setString(1, props.get(ConnectorConfig.DB_NAME)); - stmt.setString(2, props.get(ConnectorConfig.TABLE_NAME)); + stmt.setString(1, props.get(DbzConnectorConfig.DB_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); var res = stmt.executeQuery(); while (res.next()) { var ret = res.getInt(1); @@ -138,8 +152,8 @@ private void validateDbProperties( try (var stmt = conn.prepareStatement(sqlStmts.getProperty("mysql.table_schema"))) { var sourceSchema = validate.getTableSchema(); - stmt.setString(1, props.get(ConnectorConfig.DB_NAME)); - stmt.setString(2, props.get(ConnectorConfig.TABLE_NAME)); + stmt.setString(1, props.get(DbzConnectorConfig.DB_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); var res = stmt.executeQuery(); var pkFields = new HashSet(); int index = 0; @@ -189,8 +203,8 @@ private void validateDbProperties( } // check schema name and table name try (var stmt = conn.prepareStatement(sqlStmts.getProperty("postgres.table"))) { - stmt.setString(1, props.get(ConnectorConfig.PG_SCHEMA_NAME)); - stmt.setString(2, props.get(ConnectorConfig.TABLE_NAME)); + stmt.setString(1, props.get(DbzConnectorConfig.PG_SCHEMA_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); var res = stmt.executeQuery(); while (res.next()) { var ret = res.getString(1); @@ -205,9 +219,9 @@ private void validateDbProperties( try (var stmt = conn.prepareStatement(sqlStmts.getProperty("postgres.pk"))) { stmt.setString( 1, - props.get(ConnectorConfig.PG_SCHEMA_NAME) + props.get(DbzConnectorConfig.PG_SCHEMA_NAME) + "." - + props.get(ConnectorConfig.TABLE_NAME)); + + props.get(DbzConnectorConfig.TABLE_NAME)); var res = stmt.executeQuery(); var pkFields = new HashSet(); @@ -223,8 +237,8 @@ private void validateDbProperties( // check whether source schema match table schema on upstream try (var stmt = conn.prepareStatement(sqlStmts.getProperty("postgres.table_schema"))) { - stmt.setString(1, props.get(ConnectorConfig.PG_SCHEMA_NAME)); - stmt.setString(2, props.get(ConnectorConfig.TABLE_NAME)); + stmt.setString(1, props.get(DbzConnectorConfig.PG_SCHEMA_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); var res = stmt.executeQuery(); var sourceSchema = validate.getTableSchema(); int index = 0; @@ -249,6 +263,159 @@ private void validateDbProperties( } } } + // check whether user is superuser or replication role + try (var stmt = + conn.prepareStatement(sqlStmts.getProperty("postgres.role.check"))) { + stmt.setString(1, props.get(DbzConnectorConfig.USER)); + var res = stmt.executeQuery(); + while (res.next()) { + if (!res.getBoolean(1)) { + throw new StatusException( + Status.INTERNAL.withDescription( + "Postgres user must be superuser or replication role to start walsender.")); + } + } + } + // check whether user has select privilege on table for initial snapshot + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty("postgres.table_privilege.check"))) { + stmt.setString(1, props.get(DbzConnectorConfig.TABLE_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.USER)); + var res = stmt.executeQuery(); + while (res.next()) { + if (!res.getBoolean(1)) { + throw new StatusException( + Status.INTERNAL.withDescription( + "Postgres user must have select privilege on table " + + props.get( + DbzConnectorConfig.TABLE_NAME))); + } + } + } + // check whether publication exists + boolean publicationExists = false; + boolean partialPublication = false; + try (var stmt = conn.createStatement()) { + var res = + stmt.executeQuery( + sqlStmts.getProperty("postgres.publication_att_exists")); + while (res.next()) { + partialPublication = res.getBoolean(1); + } + } + // pg 15 and up supports partial publication of table + // check whether publication covers all columns + if (partialPublication) { + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty("postgres.publication_att"))) { + stmt.setString(1, props.get(DbzConnectorConfig.PG_SCHEMA_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); + var res = stmt.executeQuery(); + while (res.next()) { + String[] columnsPub = + (String[]) res.getArray("attnames").getArray(); + var sourceSchema = validate.getTableSchema(); + for (int i = 0; i < sourceSchema.getColumnsCount(); i++) { + String columnName = sourceSchema.getColumns(i).getName(); + if (Arrays.stream(columnsPub).noneMatch(columnName::equals)) { + throw new StatusException( + Status.INTERNAL.withDescription( + "The publication 'dbz_publication' does not cover all necessary columns in table " + + props.get( + DbzConnectorConfig + .TABLE_NAME))); + } + if (i == sourceSchema.getColumnsCount() - 1) { + publicationExists = true; + } + } + if (publicationExists) { + LOG.info("publication exists"); + break; + } + } + } + } else { // check directly whether publication exists + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty("postgres.publication_cnt"))) { + stmt.setString(1, props.get(DbzConnectorConfig.PG_SCHEMA_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); + var res = stmt.executeQuery(); + while (res.next()) { + if (res.getInt("count") > 0) { + publicationExists = true; + LOG.info("publication exists"); + break; + } + } + } + } + // if publication does not exist, check permission to create publication + if (!publicationExists) { + // check create privilege on database + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty( + "postgres.database_privilege.check"))) { + stmt.setString(1, props.get(DbzConnectorConfig.USER)); + stmt.setString(2, props.get(DbzConnectorConfig.DB_NAME)); + stmt.setString(3, props.get(DbzConnectorConfig.USER)); + var res = stmt.executeQuery(); + while (res.next()) { + if (!res.getBoolean(1)) { + throw new StatusException( + Status.INTERNAL.withDescription( + "Postgres user must have create privilege on database" + + props.get( + DbzConnectorConfig.DB_NAME))); + } + } + } + // check ownership on table + boolean isTableOwner = false; + String owner = null; + // check if user is owner + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty("postgres.table_owner"))) { + stmt.setString(1, props.get(DbzConnectorConfig.PG_SCHEMA_NAME)); + stmt.setString(2, props.get(DbzConnectorConfig.TABLE_NAME)); + var res = stmt.executeQuery(); + while (res.next()) { + owner = res.getString("tableowner"); + if (owner.equals(props.get(DbzConnectorConfig.USER))) { + isTableOwner = true; + break; + } + } + } + // if user is not owner, check if user belongs to owner group + if (!isTableOwner && !owner.isEmpty()) { + try (var stmt = + conn.prepareStatement( + sqlStmts.getProperty("postgres.users_of_group"))) { + stmt.setString(1, owner); + var res = stmt.executeQuery(); + while (res.next()) { + String[] users = (String[]) res.getArray("members").getArray(); + if (Arrays.stream(users) + .anyMatch(props.get(DbzConnectorConfig.USER)::equals)) { + isTableOwner = true; + break; + } + } + } + } + if (!isTableOwner) { + throw new StatusException( + Status.INTERNAL.withDescription( + "Postgres user must be owner of table " + + props.get(DbzConnectorConfig.TABLE_NAME))); + } + } break; default: break; diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DbzConnectorConfig.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DbzConnectorConfig.java new file mode 100644 index 0000000000000..62a719841521a --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DbzConnectorConfig.java @@ -0,0 +1,133 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.sourcenode.common; + +import com.risingwave.connector.api.source.SourceTypeE; +import com.risingwave.connector.cdc.debezium.internal.ConfigurableOffsetBackingStore; +import java.io.IOException; +import java.io.StringReader; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import org.apache.commons.io.IOUtils; +import org.apache.commons.text.StringSubstitutor; + +public class DbzConnectorConfig { + + /* Common configs */ + public static final String HOST = "hostname"; + public static final String PORT = "port"; + public static final String USER = "username"; + public static final String PASSWORD = "password"; + + public static final String DB_NAME = "database.name"; + public static final String TABLE_NAME = "table.name"; + + /* MySQL specified configs */ + public static final String MYSQL_SERVER_ID = "server.id"; + + /* Postgres specified configs */ + public static final String PG_SLOT_NAME = "slot.name"; + public static final String PG_SCHEMA_NAME = "schema.name"; + + private static final String MYSQL_CONFIG_FILE = "mysql.properties"; + private static final String POSTGRES_CONFIG_FILE = "postgres.properties"; + + public static Map extractDebeziumProperties(Map properties) { + // retain only debezium properties if any + var userProps = new HashMap<>(properties); + userProps.remove(DbzConnectorConfig.HOST); + userProps.remove(DbzConnectorConfig.PORT); + userProps.remove(DbzConnectorConfig.USER); + userProps.remove(DbzConnectorConfig.PASSWORD); + userProps.remove(DbzConnectorConfig.DB_NAME); + userProps.remove(DbzConnectorConfig.TABLE_NAME); + userProps.remove(DbzConnectorConfig.MYSQL_SERVER_ID); + userProps.remove(DbzConnectorConfig.PG_SLOT_NAME); + userProps.remove(DbzConnectorConfig.PG_SCHEMA_NAME); + return userProps; + } + + private final long sourceId; + private final Properties resolvedDbzProps; + + public long getSourceId() { + return sourceId; + } + + public Properties getResolvedDebeziumProps() { + return resolvedDbzProps; + } + + public DbzConnectorConfig( + SourceTypeE source, long sourceId, String startOffset, Map userProps) { + var dbzProps = new Properties(); + try (var input = getClass().getClassLoader().getResourceAsStream("debezium.properties")) { + assert input != null; + dbzProps.load(input); + } catch (IOException e) { + throw new RuntimeException("failed to load debezium.properties", e); + } + + StringSubstitutor substitutor = new StringSubstitutor(userProps); + if (source == SourceTypeE.MYSQL) { + var mysqlProps = initiateDbConfig(MYSQL_CONFIG_FILE, substitutor); + // if offset is specified, we will continue binlog reading from the specified offset + if (null != startOffset && !startOffset.isBlank()) { + // 'snapshot.mode=schema_only_recovery' must be configured if binlog offset is + // specified. + // It only snapshots the schemas, not the data, and continue binlog reading from the + // specified offset + mysqlProps.setProperty("snapshot.mode", "schema_only_recovery"); + mysqlProps.setProperty( + ConfigurableOffsetBackingStore.OFFSET_STATE_VALUE, startOffset); + } + + dbzProps.putAll(mysqlProps); + } else if (source == SourceTypeE.POSTGRES) { + var postgresProps = initiateDbConfig(POSTGRES_CONFIG_FILE, substitutor); + + // if offset is specified, we will continue reading changes from the specified offset + if (null != startOffset && !startOffset.isBlank()) { + postgresProps.setProperty("snapshot.mode", "never"); + postgresProps.setProperty( + ConfigurableOffsetBackingStore.OFFSET_STATE_VALUE, startOffset); + } + dbzProps.putAll(postgresProps); + } else { + throw new RuntimeException("unsupported source type: " + source); + } + + var otherProps = extractDebeziumProperties(userProps); + dbzProps.putAll(otherProps); + + this.sourceId = sourceId; + this.resolvedDbzProps = dbzProps; + } + + private Properties initiateDbConfig(String fileName, StringSubstitutor substitutor) { + var dbProps = new Properties(); + try (var input = getClass().getClassLoader().getResourceAsStream(fileName)) { + assert input != null; + var inputStr = IOUtils.toString(input, StandardCharsets.UTF_8); + var resolvedStr = substitutor.replace(inputStr); + dbProps.load(new StringReader(resolvedStr)); + } catch (IOException e) { + throw new RuntimeException("failed to load " + fileName, e); + } + return dbProps; + } +} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DebeziumCdcUtils.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DebeziumCdcUtils.java deleted file mode 100644 index 38c02d1d857f6..0000000000000 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/common/DebeziumCdcUtils.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.risingwave.sourcenode.common; - -import java.util.Properties; - -public class DebeziumCdcUtils { - - /** Common config properties for Debeizum CDC connectors */ - public static Properties createCommonConfig() { - var props = new Properties(); - // capture decimal type in doule values, which may result in a loss of precision but is - // easier to use - // https://debezium.io/documentation/reference/stable/connectors/mysql.html#mysql-property-decimal-handling-mode - props.setProperty("decimal.handling.mode", "double"); - - // Add a converter for `Date` data type, which convert `Date` into a string - props.setProperty("converters", "datetime"); - props.setProperty( - "datetime.type", - "com.risingwave.connector.cdc.debezium.converters.DatetimeTypeConverter"); - props.setProperty("max.batch.size", "1024"); - props.setProperty("max.queue.size", "8192"); - return props; - } -} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngine.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngine.java similarity index 50% rename from java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngine.java rename to java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngine.java index 5cabced5ad5f7..b9f85ec33b1b2 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngine.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngine.java @@ -1,37 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode.core; import com.risingwave.connector.api.source.CdcEngine; -import com.risingwave.connector.api.source.SourceConfig; import com.risingwave.proto.ConnectorServiceProto; import io.debezium.embedded.Connect; import io.debezium.engine.DebeziumEngine; import io.debezium.heartbeat.Heartbeat; +import java.util.Properties; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -public class DefaultCdcEngine implements CdcEngine { +public class DbzCdcEngine implements CdcEngine { static final int DEFAULT_QUEUE_CAPACITY = 16; private final DebeziumEngine engine; - private final CdcEventConsumer consumer; - private final SourceConfig config; + private final DbzCdcEventConsumer consumer; + private final long id; /** If config is not valid will throw exceptions */ - public DefaultCdcEngine(SourceConfig config, DebeziumEngine.CompletionCallback callback) { - var dbzHeartbeatPrefix = - config.getProperties().getProperty(Heartbeat.HEARTBEAT_TOPICS_PREFIX.name()); + public DbzCdcEngine(long id, Properties config, DebeziumEngine.CompletionCallback callback) { + var dbzHeartbeatPrefix = config.getProperty(Heartbeat.HEARTBEAT_TOPICS_PREFIX.name()); var consumer = - new CdcEventConsumer( - config.getId(), - dbzHeartbeatPrefix, - new ArrayBlockingQueue<>(DEFAULT_QUEUE_CAPACITY)); + new DbzCdcEventConsumer( + id, dbzHeartbeatPrefix, new ArrayBlockingQueue<>(DEFAULT_QUEUE_CAPACITY)); // Builds a debezium engine but not start it - this.config = config; + this.id = id; this.consumer = consumer; this.engine = DebeziumEngine.create(Connect.class) - .using(config.getProperties()) + .using(config) .using(callback) .notifying(consumer) .build(); @@ -45,7 +56,7 @@ public void run() { @Override public long getId() { - return config.getId(); + return id; } public void stop() throws Exception { diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngineRunner.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngineRunner.java similarity index 52% rename from java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngineRunner.java rename to java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngineRunner.java index 744a735a3fe74..68104c226924d 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultCdcEngineRunner.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEngineRunner.java @@ -1,8 +1,22 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode.core; import com.risingwave.connector.api.source.*; import com.risingwave.proto.ConnectorServiceProto; -import io.debezium.engine.DebeziumEngine; +import com.risingwave.sourcenode.common.DbzConnectorConfig; import io.grpc.stub.StreamObserver; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -11,44 +25,40 @@ import org.slf4j.LoggerFactory; /** Single-thread engine runner */ -public class DefaultCdcEngineRunner implements CdcEngineRunner { - static final Logger LOG = LoggerFactory.getLogger(DefaultCdcEngineRunner.class); +public class DbzCdcEngineRunner implements CdcEngineRunner { + static final Logger LOG = LoggerFactory.getLogger(DbzCdcEngineRunner.class); private final ExecutorService executor; private final AtomicBoolean running = new AtomicBoolean(false); private final CdcEngine engine; - public DefaultCdcEngineRunner(CdcEngine engine) { + public DbzCdcEngineRunner(CdcEngine engine) { this.executor = Executors.newSingleThreadExecutor(); this.engine = engine; } public static CdcEngineRunner newCdcEngineRunner( - long sourceId, - SourceConfig config, + DbzConnectorConfig config, StreamObserver responseObserver) { - DefaultCdcEngineRunner runner = null; + DbzCdcEngineRunner runner = null; try { var engine = - new DefaultCdcEngine( - config, - new DebeziumEngine.CompletionCallback() { - @Override - public void handle( - boolean success, String message, Throwable error) { - if (!success) { - responseObserver.onError(error); - LOG.error( - "failed to run the engine. message: {}", - message, - error); - } else { - responseObserver.onCompleted(); - } + new DbzCdcEngine( + config.getSourceId(), + config.getResolvedDebeziumProps(), + (success, message, error) -> { + if (!success) { + responseObserver.onError(error); + LOG.error( + "the engine terminated with error. message: {}", + message, + error); + } else { + responseObserver.onCompleted(); } }); - runner = new DefaultCdcEngineRunner(engine); + runner = new DbzCdcEngineRunner(engine); } catch (Exception e) { LOG.error("failed to create the CDC engine", e); } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/CdcEventConsumer.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEventConsumer.java similarity index 85% rename from java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/CdcEventConsumer.java rename to java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEventConsumer.java index e41e38964ff48..499e97b48b704 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/CdcEventConsumer.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzCdcEventConsumer.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode.core; import com.risingwave.connector.cdc.debezium.internal.DebeziumOffset; @@ -19,16 +33,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class CdcEventConsumer +public class DbzCdcEventConsumer implements DebeziumEngine.ChangeConsumer> { - static final Logger LOG = LoggerFactory.getLogger(CdcEventConsumer.class); + static final Logger LOG = LoggerFactory.getLogger(DbzCdcEventConsumer.class); private final BlockingQueue outputChannel; private final long sourceId; private final JsonConverter converter; private final String heartbeatTopicPrefix; - CdcEventConsumer( + DbzCdcEventConsumer( long sourceId, String heartbeatTopicPrefix, BlockingQueue store) { diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultSourceHandler.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzSourceHandler.java similarity index 59% rename from java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultSourceHandler.java rename to java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzSourceHandler.java index 22ac840136ae7..777c0c77cc674 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DefaultSourceHandler.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/DbzSourceHandler.java @@ -1,32 +1,41 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode.core; -import com.risingwave.connector.api.source.SourceConfig; import com.risingwave.connector.api.source.SourceHandler; import com.risingwave.proto.ConnectorServiceProto.GetEventStreamResponse; +import com.risingwave.sourcenode.common.DbzConnectorConfig; import io.grpc.Context; import io.grpc.stub.ServerCallStreamObserver; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** Default handler for RPC request */ -public class DefaultSourceHandler implements SourceHandler { - static final Logger LOG = LoggerFactory.getLogger(DefaultSourceHandler.class); +/** * handler for starting a debezium source connectors */ +public class DbzSourceHandler implements SourceHandler { + static final Logger LOG = LoggerFactory.getLogger(DbzSourceHandler.class); - private final SourceConfig config; + private final DbzConnectorConfig config; - private DefaultSourceHandler(SourceConfig config) { + public DbzSourceHandler(DbzConnectorConfig config) { this.config = config; } - public static DefaultSourceHandler newWithConfig(SourceConfig config) { - return new DefaultSourceHandler(config); - } - @Override - public void handle(ServerCallStreamObserver responseObserver) { - var runner = - DefaultCdcEngineRunner.newCdcEngineRunner(config.getId(), config, responseObserver); + public void startSource(ServerCallStreamObserver responseObserver) { + var runner = DbzCdcEngineRunner.newCdcEngineRunner(config, responseObserver); if (runner == null) { responseObserver.onCompleted(); return; @@ -35,7 +44,7 @@ public void handle(ServerCallStreamObserver responseObse try { // Start the engine runner.start(); - LOG.info("Start consuming events of table {}", config.getId()); + LOG.info("Start consuming events of table {}", config.getSourceId()); while (runner.isRunning()) { try { // Thread will block on the channel to get output from engine @@ -51,7 +60,7 @@ public void handle(ServerCallStreamObserver responseObse LOG.debug( "Engine#{}: emit one chunk {} events to network ", - config.getId(), + config.getSourceId(), resp.getEventsCount()); responseObserver.onNext(resp); } @@ -59,7 +68,7 @@ public void handle(ServerCallStreamObserver responseObse if (Context.current().isCancelled()) { LOG.info( "Engine#{}: Connection broken detected, stop the engine", - config.getId()); + config.getSourceId()); runner.stop(); } } catch (InterruptedException e) { @@ -71,9 +80,9 @@ public void handle(ServerCallStreamObserver responseObse try { runner.stop(); } catch (Exception e) { - LOG.warn("Failed to stop Engine#{}", config.getId(), e); + LOG.warn("Failed to stop Engine#{}", config.getSourceId(), e); } } - LOG.info("End consuming events of table {}", config.getId()); + LOG.info("End consuming events of table {}", config.getSourceId()); } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/SourceHandlerFactory.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/SourceHandlerFactory.java index 9b3a6239126d5..8692f25da25dd 100644 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/SourceHandlerFactory.java +++ b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/core/SourceHandlerFactory.java @@ -1,9 +1,22 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.sourcenode.core; import com.risingwave.connector.api.source.SourceHandler; import com.risingwave.connector.api.source.SourceTypeE; -import com.risingwave.sourcenode.mysql.MySqlSourceConfig; -import com.risingwave.sourcenode.postgres.PostgresSourceConfig; +import com.risingwave.sourcenode.common.DbzConnectorConfig; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -12,17 +25,9 @@ public abstract class SourceHandlerFactory { static final Logger LOG = LoggerFactory.getLogger(SourceHandlerFactory.class); public static SourceHandler createSourceHandler( - SourceTypeE type, long sourceId, String startOffset, Map userProps) { - switch (type) { - case MYSQL: - return DefaultSourceHandler.newWithConfig( - new MySqlSourceConfig(sourceId, startOffset, userProps)); - case POSTGRES: - return DefaultSourceHandler.newWithConfig( - new PostgresSourceConfig(sourceId, startOffset, userProps)); - default: - LOG.warn("unknown source type: {}", type); - return null; - } + SourceTypeE source, long sourceId, String startOffset, Map userProps) { + var config = new DbzConnectorConfig(source, sourceId, startOffset, userProps); + LOG.info("resolved config for source#{}: {}", sourceId, config.getResolvedDebeziumProps()); + return new DbzSourceHandler(config); } } diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/mysql/MySqlSourceConfig.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/mysql/MySqlSourceConfig.java deleted file mode 100644 index 2939e3e5d367d..0000000000000 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/mysql/MySqlSourceConfig.java +++ /dev/null @@ -1,95 +0,0 @@ -package com.risingwave.sourcenode.mysql; - -import com.risingwave.connector.api.source.ConnectorConfig; -import com.risingwave.connector.api.source.SourceConfig; -import com.risingwave.connector.api.source.SourceTypeE; -import com.risingwave.connector.cdc.debezium.internal.ConfigurableOffsetBackingStore; -import com.risingwave.sourcenode.common.DebeziumCdcUtils; -import java.util.Map; -import java.util.Properties; - -/** MySQL Source Config */ -public class MySqlSourceConfig implements SourceConfig { - static final String DB_SERVER_NAME_PREFIX = "RW_CDC_"; - private final Properties props = DebeziumCdcUtils.createCommonConfig(); - private final long id; - private final String sourceName; - - public MySqlSourceConfig(long sourceId, String startOffset, Map userProps) { - id = sourceId; - props.setProperty("connector.class", "io.debezium.connector.mysql.MySqlConnector"); - props.setProperty( - "offset.storage", ConfigurableOffsetBackingStore.class.getCanonicalName()); - - props.setProperty( - "database.history", "io.debezium.relational.history.MemoryDatabaseHistory"); - // if offset is specified, we will continue binlog reading from the specified offset - if (null != startOffset && !startOffset.isBlank()) { - // 'snapshot.mode=schema_only_recovery' must be configured if binlog offset is - // specified. - // It only snapshots the schemas, not the data, and continue binlog reading from the - // specified offset - props.setProperty("snapshot.mode", "schema_only_recovery"); - props.setProperty(ConfigurableOffsetBackingStore.OFFSET_STATE_VALUE, startOffset); - } - - // Begin of connector configs - props.setProperty("database.hostname", userProps.get(ConnectorConfig.HOST)); - props.setProperty("database.port", userProps.get(ConnectorConfig.PORT)); - props.setProperty("database.user", userProps.get(ConnectorConfig.USER)); - props.setProperty("database.password", userProps.get(ConnectorConfig.PASSWORD)); - - props.setProperty("database.include.list", userProps.get(ConnectorConfig.DB_NAME)); - // only captures data of the specified table - String tableFilter = - userProps.get(ConnectorConfig.DB_NAME) - + "." - + userProps.get(ConnectorConfig.TABLE_NAME); - props.setProperty("table.include.list", tableFilter); - - // disable schema change events for current stage - props.setProperty("include.schema.changes", "false"); - - // ServerId must be unique since the connector will join the mysql cluster as a client. - // By default, we generate serverId by adding a fixed number to the sourceId generated by - // Meta. We may allow user to specify the ID in the future. - props.setProperty("database.server.id", userProps.get(ConnectorConfig.MYSQL_SERVER_ID)); - props.setProperty("database.server.name", DB_SERVER_NAME_PREFIX + tableFilter); - - // host:port:database.table - sourceName = - userProps.get(ConnectorConfig.HOST) - + ":" - + userProps.get(ConnectorConfig.PORT) - + ":" - + userProps.get(ConnectorConfig.DB_NAME) - + "." - + userProps.get(ConnectorConfig.TABLE_NAME); - - props.setProperty("name", sourceName); - - // pass through debezium properties if any - var dbzProperties = ConnectorConfig.extractDebeziumProperties(userProps); - props.putAll(dbzProperties); - } - - @Override - public long getId() { - return id; - } - - @Override - public String getSourceName() { - return sourceName; - } - - @Override - public SourceTypeE getSourceType() { - return SourceTypeE.MYSQL; - } - - @Override - public Properties getProperties() { - return props; - } -} diff --git a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/postgres/PostgresSourceConfig.java b/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/postgres/PostgresSourceConfig.java deleted file mode 100644 index a314f037e8ae1..0000000000000 --- a/java/connector-node/risingwave-connector-service/src/main/java/com/risingwave/sourcenode/postgres/PostgresSourceConfig.java +++ /dev/null @@ -1,110 +0,0 @@ -package com.risingwave.sourcenode.postgres; - -import com.risingwave.connector.api.source.ConnectorConfig; -import com.risingwave.connector.api.source.SourceConfig; -import com.risingwave.connector.api.source.SourceTypeE; -import com.risingwave.connector.cdc.debezium.internal.ConfigurableOffsetBackingStore; -import com.risingwave.sourcenode.common.DebeziumCdcUtils; -import io.debezium.heartbeat.Heartbeat; -import java.time.Duration; -import java.util.Map; -import java.util.Properties; - -/** Postgres Source Config */ -public class PostgresSourceConfig implements SourceConfig { - static final String DB_SERVER_NAME_PREFIX = "RW_CDC_"; - private final Properties props = DebeziumCdcUtils.createCommonConfig(); - private final long id; - private final String sourceName; - private static final long DEFAULT_HEARTBEAT_MS = Duration.ofMinutes(5).toMillis(); - - public PostgresSourceConfig(long sourceId, String startOffset, Map userProps) { - id = sourceId; - props.setProperty("connector.class", "io.debezium.connector.postgresql.PostgresConnector"); - props.setProperty( - "offset.storage", ConfigurableOffsetBackingStore.class.getCanonicalName()); - props.setProperty( - "database.history", "io.debezium.relational.history.MemoryDatabaseHistory"); - - // if offset is specified, we will continue reading changes from the specified offset - if (null != startOffset && !startOffset.isBlank()) { - props.setProperty("snapshot.mode", "never"); - props.setProperty(ConfigurableOffsetBackingStore.OFFSET_STATE_VALUE, startOffset); - } - - // Begin of connector configs - props.setProperty("database.hostname", userProps.get(ConnectorConfig.HOST)); - props.setProperty("database.port", userProps.get(ConnectorConfig.PORT)); - props.setProperty("database.user", userProps.get(ConnectorConfig.USER)); - props.setProperty("database.password", userProps.get(ConnectorConfig.PASSWORD)); - props.setProperty("database.dbname", userProps.get(ConnectorConfig.DB_NAME)); - // The name of the PostgreSQL logical decoding plug-in installed on the PostgreSQL server. - // Supported values are decoderbufs, and pgoutput. - // The wal2json plug-in is deprecated and scheduled for removal. - // see - // https://debezium.io/documentation/reference/1.9/connectors/postgresql.html#postgresql-property-plugin-name - props.setProperty("plugin.name", "pgoutput"); - - // The name of the PostgreSQL logical decoding slot that was created for streaming changes - // from a particular plug-in for a particular database/schema. The server uses this slot to - // stream events - // to the Debezium connector that you are configuring. - // Slot names must conform to PostgreSQL replication slot naming rules, - // which state: "Each replication slot has a name, which can contain lower-case letters, - // numbers, and the underscore character." - props.setProperty("slot.name", userProps.get(ConnectorConfig.PG_SLOT_NAME)); - - // Sending heartbeat messages enables the connector to send the latest retrieved LSN to the - // database, which allows the database to reclaim disk space being - // used by no longer needed WAL files. - // https://debezium.io/documentation/reference/1.9/connectors/postgresql.html#postgresql-property-heartbeat-interval-ms - props.setProperty("heartbeat.interval.ms", String.valueOf(DEFAULT_HEARTBEAT_MS)); - props.setProperty( - Heartbeat.HEARTBEAT_TOPICS_PREFIX.name(), - Heartbeat.HEARTBEAT_TOPICS_PREFIX.defaultValueAsString()); - - String tableFilter = - userProps.get(ConnectorConfig.PG_SCHEMA_NAME) - + "." - + userProps.get(ConnectorConfig.TABLE_NAME); - props.setProperty("table.include.list", tableFilter); - props.setProperty("database.server.name", DB_SERVER_NAME_PREFIX + tableFilter); - - // host:port:database.schema.table - sourceName = - userProps.get(ConnectorConfig.HOST) - + ":" - + userProps.get(ConnectorConfig.PORT) - + ":" - + userProps.get(ConnectorConfig.DB_NAME) - + "." - + userProps.get(ConnectorConfig.PG_SCHEMA_NAME) - + "." - + userProps.get(ConnectorConfig.TABLE_NAME); - props.setProperty("name", sourceName); - - // pass through debezium properties if any - var dbzProperties = ConnectorConfig.extractDebeziumProperties(userProps); - props.putAll(dbzProperties); - } - - @Override - public long getId() { - return id; - } - - @Override - public String getSourceName() { - return sourceName; - } - - @Override - public SourceTypeE getSourceType() { - return SourceTypeE.POSTGRES; - } - - @Override - public Properties getProperties() { - return props; - } -} diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/debezium.properties b/java/connector-node/risingwave-connector-service/src/main/resources/debezium.properties new file mode 100644 index 0000000000000..666e547188d9a --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/resources/debezium.properties @@ -0,0 +1,8 @@ +# Store common debezium configs shared by all connectors +decimal.handling.mode=double +converters=datetime +datetime.type=com.risingwave.connector.cdc.debezium.converters.DatetimeTypeConverter +max.batch.size=1024 +max.queue.size=8192 + +time.precision.mode=adaptive_time_microseconds diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/log4j.properties b/java/connector-node/risingwave-connector-service/src/main/resources/log4j.properties deleted file mode 100644 index e9584531b5ed3..0000000000000 --- a/java/connector-node/risingwave-connector-service/src/main/resources/log4j.properties +++ /dev/null @@ -1,14 +0,0 @@ -log4j.rootLogger=info, stdout, fout -log4j.logger.com.risingwave=info - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.EnhancedPatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%t] %c{2}:%L - %m%n - -log4j.appender.fout=org.apache.log4j.DailyRollingFileAppender -log4j.appender.fout.File=connector-node.log -log4j.appender.fout.ImmediateFlush=true -log4j.appender.fout.Append=true -log4j.appender.fout.DatePattern='.'yyyy-MM-dd -log4j.appender.fout.layout=org.apache.log4j.EnhancedPatternLayout -log4j.appender.fout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%t] %c{2}:%L - %m%n diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/log4j2.properties b/java/connector-node/risingwave-connector-service/src/main/resources/log4j2.properties new file mode 100644 index 0000000000000..55c02dece2c36 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/resources/log4j2.properties @@ -0,0 +1,12 @@ +rootLogger.level = INFO +# declare the appender to use +appenders = console + +# appender properties +appender.console.type = Console +appender.console.name = stdout +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%t] %c{2}:%L - %m%n + +rootLogger.appenderRefs = console +rootLogger.appenderRef.console.ref = stdout diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties b/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties new file mode 100644 index 0000000000000..d7b9b384ca4b1 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/resources/mysql.properties @@ -0,0 +1,21 @@ +# configs for mysql connector +connector.class=io.debezium.connector.mysql.MySqlConnector +offset.storage=com.risingwave.connector.cdc.debezium.internal.ConfigurableOffsetBackingStore +database.history=io.debezium.relational.history.MemoryDatabaseHistory +# default snapshot mode to initial +snapshot.mode=${debezium.snapshot.mode:-initial} + +database.hostname=${hostname} +database.port=${port} +database.user=${username} +database.password=${password} + +database.include.list=${database.name} +table.include.list=${database.name}.${table.name} + +# default to disable schema change events +include.schema.changes=${debezium.include.schema.changes:-false} +database.server.id=${server.id} +database.server.name=RW_CDC_${database.name}.${table.name} + +name=${hostname}:${port}:${database.name}.${table.name} diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/postgres.properties b/java/connector-node/risingwave-connector-service/src/main/resources/postgres.properties new file mode 100644 index 0000000000000..05e10d44f0eb9 --- /dev/null +++ b/java/connector-node/risingwave-connector-service/src/main/resources/postgres.properties @@ -0,0 +1,30 @@ +# configs for postgres conneoctor +connector.class=io.debezium.connector.postgresql.PostgresConnector +offset.storage=com.risingwave.connector.cdc.debezium.internal.ConfigurableOffsetBackingStore +database.history=io.debezium.relational.history.MemoryDatabaseHistory +# default snapshot mode to initial +snapshot.mode=${debezium.snapshot.mode:-initial} + +database.hostname=${hostname} +database.port=${port} +database.user=${username} +database.password=${password} + +database.dbname=${database.name} +table.include.list=${schema.name}.${table.name} + +# The name of the PostgreSQL replication slot +slot.name=${slot.name} + +# default plugin name is 'pgoutput' +plugin.name=${debezium.plugin.name:-pgoutput} + +# allow to auto create publication for given tables +publication.autocreate.mode=${debezium.publication.autocreate.mode:-filtered} + +# default heartbeat interval 5 mins +heartbeat.interval.ms=${debezium.heartbeat.interval.ms:-300000} +heartbeat.topics.prefix=${debezium.heartbeat.topics.prefix:-RW_CDC_HeartBeat_} + +database.server.name=RW_CDC_${database.name}.${table.name} +name=${hostname}:${port}:${database.name}.${schema.name}.${table.name} diff --git a/java/connector-node/risingwave-connector-service/src/main/resources/validate_sql.properties b/java/connector-node/risingwave-connector-service/src/main/resources/validate_sql.properties index 0f84c4401baa9..bff8753a85d06 100644 --- a/java/connector-node/risingwave-connector-service/src/main/resources/validate_sql.properties +++ b/java/connector-node/risingwave-connector-service/src/main/resources/validate_sql.properties @@ -8,3 +8,28 @@ postgres.table=SELECT EXISTS ( SELECT FROM pg_tables WHERE schemaname = ? AND ta postgres.pk=SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) WHERE i.indrelid = ?::regclass AND i.indisprimary postgres.table_schema=SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = ? AND table_name = ? ORDER BY ordinal_position postgres.slot.check=SELECT slot_name FROM pg_replication_slots WHERE slot_name = ? +postgres.role.check=SELECT rolreplication OR rolsuper FROM pg_roles WHERE rolname = ? +postgres.database_privilege.check=SELECT has_database_privilege(?, ?, 'create') FROM pg_roles WHERE rolname = ? +postgres.table_privilege.check=SELECT (COUNT(*) = 1) FROM information_schema.role_table_grants WHERE table_name = ? AND grantee = ? and privilege_type = 'SELECT' +postgres.table_owner=SELECT tableowner FROM pg_tables WHERE schemaname = ? and tablename = ? +postgres.publication_att_exists=SELECT count(*) > 0 FROM information_schema.columns WHERE table_name = 'pg_publication_tables' AND column_name = 'attnames' +postgres.publication_att=SELECT attnames FROM pg_publication_tables WHERE schemaname = ? AND tablename = ? AND pubname = 'dbz_publication' +postgres.publication_cnt=SELECT COUNT(*) AS count FROM pg_publication_tables WHERE schemaname = ? AND tablename = ? AND pubname = 'dbz_publication' +postgres.users_of_group=WITH RECURSIVE base (g, m) AS (( \ +SELECT r1.rolname as group, ARRAY_AGG(DISTINCT(r2.rolname)) as members FROM pg_auth_members am \ +INNER JOIN pg_roles r1 ON r1.oid = am.roleid \ +INNER JOIN pg_roles r2 ON r2.oid = am.member \ +WHERE r1.rolname = ? \ +GROUP BY r1.rolname \ +) \ +UNION ALL ( \ +WITH groups AS (SELECT DISTINCT(UNNEST(m)) AS g FROM base) \ +SELECT r1.rolname as group, ARRAY_AGG(DISTINCT(r2.rolname)) as members FROM pg_auth_members am \ +INNER JOIN pg_roles r1 ON r1.oid = am.roleid \ +INNER JOIN pg_roles r2 ON r2.oid = am.member \ +INNER JOIN groups ON r1.rolname = groups.g \ +GROUP BY r1.rolname \ +) \ +), \ +tmp AS (SELECT DISTINCT(UNNEST(m)) AS members FROM base) \ +SELECT ARRAY_AGG(members) AS members FROM tmp diff --git a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/DeserializerTest.java b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/DeserializerTest.java index 8841b4039b6f6..2ee86ff95ad76 100644 --- a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/DeserializerTest.java +++ b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/DeserializerTest.java @@ -1,7 +1,22 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkRow; +import com.risingwave.proto.ConnectorServiceProto; import com.risingwave.proto.ConnectorServiceProto.SinkStreamRequest.WriteBatch.JsonPayload; import com.risingwave.proto.Data; import junit.framework.TestCase; @@ -17,7 +32,11 @@ public void testJsonDeserializer() { .setLine("{\"id\": 1, \"name\": \"John\"}") .build()) .build(); - SinkRow outcome = deserializer.deserialize(jsonPayload).next(); + ConnectorServiceProto.SinkStreamRequest.WriteBatch writeBatch = + ConnectorServiceProto.SinkStreamRequest.WriteBatch.newBuilder() + .setJsonPayload(jsonPayload) + .build(); + SinkRow outcome = deserializer.deserialize(writeBatch).next(); assertEquals(outcome.get(0), 1); assertEquals(outcome.get(1), "John"); } diff --git a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/FileSinkTest.java b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/FileSinkTest.java index 9f71cbabc195f..1ec9eda586c9f 100644 --- a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/FileSinkTest.java +++ b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/FileSinkTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; @@ -5,7 +19,7 @@ import com.google.common.collect.Iterators; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -26,7 +40,7 @@ public void testSync() throws IOException { Path file = Paths.get(filePath); try { - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice"))); sink.sync(); String[] expectedA = {"[1,\"Alice\"]"}; String[] actualA = Files.lines(file).toArray(String[]::new); @@ -34,7 +48,7 @@ public void testSync() throws IOException { IntStream.range(0, expectedA.length) .forEach(i -> assertEquals(expectedA[i], actualA[i])); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob"))); String[] expectedB = new String[] {"[1,\"Alice\"]"}; String[] actualB = Files.lines(file).toArray(String[]::new); assertEquals(expectedB.length, actualB.length); @@ -70,8 +84,8 @@ public void testWrite() throws IOException { String[] expected = {"[1,\"Alice\"]", "[2,\"Bob\"]"}; sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.INSERT, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.INSERT, 2, "Bob"))); sink.sync(); String[] actual = Files.lines(Paths.get(filePath)).toArray(String[]::new); diff --git a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/PrintSinkTest.java b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/PrintSinkTest.java index eca9a026090a6..5d3e8a9c4cf0e 100644 --- a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/PrintSinkTest.java +++ b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/PrintSinkTest.java @@ -1,10 +1,24 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; import com.google.common.collect.Iterators; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import java.io.PrintStream; import java.util.HashMap; import java.util.Iterator; @@ -52,10 +66,10 @@ public void print(String x) { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Alice"), - new ArraySinkrow(Op.UPDATE_INSERT, 2, "Bob"), - new ArraySinkrow(Op.DELETE, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Alice"), + new ArraySinkRow(Op.UPDATE_INSERT, 2, "Bob"), + new ArraySinkRow(Op.DELETE, 2, "Bob"))); if (!writeCalled[0]) { fail("write batch did not print messages"); } diff --git a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/SinkStreamObserverTest.java b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/SinkStreamObserverTest.java index af86cfacae82f..739c906b3f1da 100644 --- a/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/SinkStreamObserverTest.java +++ b/java/connector-node/risingwave-connector-service/src/test/java/com/risingwave/connector/SinkStreamObserverTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; @@ -14,7 +28,7 @@ public class SinkStreamObserverTest { public SinkConfig fileSinkConfig = SinkConfig.newBuilder() .setTableSchema(TableSchema.getMockTableProto()) - .setSinkType("file") + .setConnectorType("file") .putAllProperties(Map.of("output.path", "/tmp/rw-connector")) .build(); @@ -78,6 +92,7 @@ public void testOnNext_syncValidation() { .setStart( ConnectorServiceProto.SinkStreamRequest.StartSink.newBuilder() .setSinkConfig(fileSinkConfig) + .setFormat(ConnectorServiceProto.SinkPayloadFormat.JSON) .build()) .build(); ConnectorServiceProto.SinkStreamRequest firstSync = @@ -119,6 +134,7 @@ public void testOnNext_startEpochValidation() { .setStart( ConnectorServiceProto.SinkStreamRequest.StartSink.newBuilder() .setSinkConfig(fileSinkConfig) + .setFormat(ConnectorServiceProto.SinkPayloadFormat.JSON) .build()) .build(); ConnectorServiceProto.SinkStreamRequest firstSync = @@ -183,6 +199,7 @@ public void testOnNext_writeValidation() { ConnectorServiceProto.SinkStreamRequest.newBuilder() .setStart( ConnectorServiceProto.SinkStreamRequest.StartSink.newBuilder() + .setFormat(ConnectorServiceProto.SinkPayloadFormat.JSON) .setSinkConfig(fileSinkConfig)) .build(); ConnectorServiceProto.SinkStreamRequest firstStartEpoch = diff --git a/java/connector-node/risingwave-sink-deltalake/pom.xml b/java/connector-node/risingwave-sink-deltalake/pom.xml index 71e4f182d1a44..cbb28db6e2ce2 100644 --- a/java/connector-node/risingwave-sink-deltalake/pom.xml +++ b/java/connector-node/risingwave-sink-deltalake/pom.xml @@ -68,8 +68,8 @@ - org.slf4j - slf4j-api + org.apache.logging.log4j + log4j-api org.apache.logging.log4j diff --git a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java index adbce8193cae9..f4ceded0a055f 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java +++ b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; @@ -61,24 +75,27 @@ public void write(Iterator rows) { } } while (rows.hasNext()) { - SinkRow row = rows.next(); - switch (row.getOp()) { - case INSERT: - GenericRecord record = new GenericData.Record(this.sinkSchema); - for (int i = 0; i < this.sinkSchema.getFields().size(); i++) { - record.put(i, row.get(i)); - } - try { - this.parquetWriter.write(record); - this.numOutputRows += 1; - } catch (IOException ioException) { - throw INTERNAL.withCause(ioException).asRuntimeException(); - } - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); + try (SinkRow row = rows.next()) { + switch (row.getOp()) { + case INSERT: + GenericRecord record = new GenericData.Record(this.sinkSchema); + for (int i = 0; i < this.sinkSchema.getFields().size(); i++) { + record.put(i, row.get(i)); + } + try { + this.parquetWriter.write(record); + this.numOutputRows += 1; + } catch (IOException ioException) { + throw INTERNAL.withCause(ioException).asRuntimeException(); + } + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } } diff --git a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkFactory.java b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkFactory.java index e29c522112949..57b83bf8d0ca2 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkFactory.java +++ b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkFactory.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; @@ -6,8 +20,10 @@ import com.risingwave.connector.api.sink.SinkBase; import com.risingwave.connector.api.sink.SinkFactory; import com.risingwave.java.utils.MinioUrlParser; +import com.risingwave.proto.Catalog.SinkType; import io.delta.standalone.DeltaLog; import io.delta.standalone.types.StructType; +import io.grpc.Status; import java.nio.file.Paths; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -22,9 +38,6 @@ public class DeltaLakeSinkFactory implements SinkFactory { @Override public SinkBase create(TableSchema tableSchema, Map tableProperties) { - // TODO: Remove this call to `validate` after supporting sink validation in risingwave. - validate(tableSchema, tableProperties); - String location = tableProperties.get(LOCATION_PROP); String locationType = tableProperties.get(LOCATION_TYPE_PROP); @@ -38,7 +51,14 @@ public SinkBase create(TableSchema tableSchema, Map tablePropert } @Override - public void validate(TableSchema tableSchema, Map tableProperties) { + public void validate( + TableSchema tableSchema, Map tableProperties, SinkType sinkType) { + if (sinkType != SinkType.APPEND_ONLY && sinkType != SinkType.FORCE_APPEND_ONLY) { + throw Status.INVALID_ARGUMENT + .withDescription("only append-only delta lake sink is supported") + .asRuntimeException(); + } + if (!tableProperties.containsKey(LOCATION_PROP) || !tableProperties.containsKey(LOCATION_TYPE_PROP)) { throw INVALID_ARGUMENT diff --git a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkUtil.java b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkUtil.java index 1bc3918e11efc..4548b816a131c 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkUtil.java +++ b/java/connector-node/risingwave-sink-deltalake/src/main/java/com/risingwave/connector/DeltaLakeSinkUtil.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.*; diff --git a/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeLocalSinkTest.java b/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeLocalSinkTest.java index 5b8b4bdca9162..a3c614bdac308 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeLocalSinkTest.java +++ b/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeLocalSinkTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.connector.DeltaLakeSinkFactoryTest.*; @@ -6,7 +20,7 @@ import com.google.common.collect.Iterators; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import io.delta.standalone.DeltaLog; import java.io.IOException; import java.nio.file.Files; @@ -52,8 +66,8 @@ public void testWrite() throws IOException { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.INSERT, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.INSERT, 2, "Bob"))); sink.sync(); List rows = List.of(RowFactory.create(1, "Alice"), RowFactory.create(2, "Bob")); @@ -79,14 +93,14 @@ public void testSync() throws IOException { createStructField("name", StringType, false), }); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice"))); validateTableWithSpark(location, List.of(), schema); sink.sync(); List rows = List.of(RowFactory.create(1, "Alice")); validateTableWithSpark(location, rows, schema); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob"))); sink.sync(); rows = List.of(RowFactory.create(1, "Alice"), RowFactory.create(2, "Bob")); validateTableWithSpark(location, rows, schema); diff --git a/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeSinkFactoryTest.java b/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeSinkFactoryTest.java index af5a314dec373..0c1d685eb236e 100644 --- a/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeSinkFactoryTest.java +++ b/java/connector-node/risingwave-sink-deltalake/src/test/java/com/risingwave/connector/DeltaLakeSinkFactoryTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.INVALID_ARGUMENT; diff --git a/java/connector-node/risingwave-sink-iceberg/pom.xml b/java/connector-node/risingwave-sink-iceberg/pom.xml index 0d8567d5a3136..af7478e35a6be 100644 --- a/java/connector-node/risingwave-sink-iceberg/pom.xml +++ b/java/connector-node/risingwave-sink-iceberg/pom.xml @@ -104,8 +104,8 @@ test - org.slf4j - slf4j-api + org.apache.logging.log4j + log4j-api org.apache.logging.log4j diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSink.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSink.java index 769db16ef7eb1..ab7965697b7a8 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSink.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.INTERNAL; @@ -57,60 +71,65 @@ public IcebergSink( @Override public void write(Iterator rows) { while (rows.hasNext()) { - SinkRow row = rows.next(); - switch (row.getOp()) { - case INSERT: - Record record = GenericRecord.create(rowSchema); - if (row.size() != getTableSchema().getColumnNames().length) { - throw INTERNAL.withDescription("row values do not match table schema") - .asRuntimeException(); - } - for (int i = 0; i < rowSchema.columns().size(); i++) { - record.set(i, row.get(i)); - } - PartitionKey partitionKey = - new PartitionKey( - transaction.table().spec(), transaction.table().schema()); - partitionKey.partition(record); - DataWriter dataWriter; - if (dataWriterMap.containsKey(partitionKey)) { - dataWriter = dataWriterMap.get(partitionKey); - } else { - try { - String filename = fileFormat.addExtension(UUID.randomUUID().toString()); - OutputFile outputFile = - transaction - .table() - .io() - .newOutputFile( - transaction.table().location() - + "/data/" - + transaction - .table() - .spec() - .partitionToPath(partitionKey) - + "/" - + filename); - dataWriter = - Parquet.writeData(outputFile) - .schema(rowSchema) - .withSpec(transaction.table().spec()) - .withPartition(partitionKey) - .createWriterFunc(GenericParquetWriter::buildWriter) - .overwrite() - .build(); - } catch (Exception e) { - throw INTERNAL.withDescription("failed to create dataWriter") + try (SinkRow row = rows.next()) { + switch (row.getOp()) { + case INSERT: + Record record = GenericRecord.create(rowSchema); + if (row.size() != getTableSchema().getColumnNames().length) { + throw INTERNAL.withDescription("row values do not match table schema") .asRuntimeException(); } - dataWriterMap.put(partitionKey, dataWriter); - } - dataWriter.write(record); - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) - .asRuntimeException(); + for (int i = 0; i < rowSchema.columns().size(); i++) { + record.set(i, row.get(i)); + } + PartitionKey partitionKey = + new PartitionKey( + transaction.table().spec(), transaction.table().schema()); + partitionKey.partition(record); + DataWriter dataWriter; + if (dataWriterMap.containsKey(partitionKey)) { + dataWriter = dataWriterMap.get(partitionKey); + } else { + try { + String filename = + fileFormat.addExtension(UUID.randomUUID().toString()); + OutputFile outputFile = + transaction + .table() + .io() + .newOutputFile( + transaction.table().location() + + "/data/" + + transaction + .table() + .spec() + .partitionToPath( + partitionKey) + + "/" + + filename); + dataWriter = + Parquet.writeData(outputFile) + .schema(rowSchema) + .withSpec(transaction.table().spec()) + .withPartition(partitionKey) + .createWriterFunc(GenericParquetWriter::buildWriter) + .overwrite() + .build(); + } catch (Exception e) { + throw INTERNAL.withDescription("failed to create dataWriter") + .asRuntimeException(); + } + dataWriterMap.put(partitionKey, dataWriter); + } + dataWriter.write(record); + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } } diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSinkFactory.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSinkFactory.java index af8bf16c29c8d..1cdb1edf0e468 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSinkFactory.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/IcebergSinkFactory.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.INVALID_ARGUMENT; @@ -6,8 +20,10 @@ import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkBase; import com.risingwave.connector.api.sink.SinkFactory; -import com.risingwave.java.utils.MinioUrlParser; +import com.risingwave.proto.Catalog.SinkType; import io.grpc.Status; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; @@ -23,12 +39,16 @@ public class IcebergSinkFactory implements SinkFactory { private static final Logger LOG = LoggerFactory.getLogger(IcebergSinkFactory.class); - public static final String SINK_MODE_PROP = "sink.mode"; - public static final String LOCATION_TYPE_PROP = "location.type"; + public static final String SINK_TYPE_PROP = "type"; public static final String WAREHOUSE_PATH_PROP = "warehouse.path"; public static final String DATABASE_NAME_PROP = "database.name"; public static final String TABLE_NAME_PROP = "table.name"; + public static final String S3_ACCESS_KEY_PROP = "s3.access.key"; + public static final String S3_SECRET_KEY_PROP = "s3.secret.key"; + public static final String S3_ENDPOINT_PROP = "s3.endpoint"; public static final FileFormat FILE_FORMAT = FileFormat.PARQUET; + + // hadoop catalog config private static final String confEndpoint = "fs.s3a.endpoint"; private static final String confKey = "fs.s3a.access.key"; private static final String confSecret = "fs.s3a.secret.key"; @@ -38,24 +58,24 @@ public class IcebergSinkFactory implements SinkFactory { @Override public SinkBase create(TableSchema tableSchema, Map tableProperties) { - // TODO: Remove this call to `validate` after supporting sink validation in risingwave. - validate(tableSchema, tableProperties); - - String mode = tableProperties.get(SINK_MODE_PROP); - String location = tableProperties.get(LOCATION_TYPE_PROP); - String warehousePath = tableProperties.get(WAREHOUSE_PATH_PROP); + String mode = tableProperties.get(SINK_TYPE_PROP); + String warehousePath = getWarehousePath(tableProperties); String databaseName = tableProperties.get(DATABASE_NAME_PROP); String tableName = tableProperties.get(TABLE_NAME_PROP); + String scheme = parseWarehousePathScheme(warehousePath); + TableIdentifier tableIdentifier = TableIdentifier.of(databaseName, tableName); - HadoopCatalog hadoopCatalog = createHadoopCatalog(location, warehousePath); + Configuration hadoopConf = createHadoopConf(scheme, tableProperties); + HadoopCatalog hadoopCatalog = new HadoopCatalog(hadoopConf, warehousePath); Table icebergTable; try { icebergTable = hadoopCatalog.loadTable(tableIdentifier); + hadoopCatalog.close(); } catch (Exception e) { - LOG.error("load table error: {}", e); throw Status.FAILED_PRECONDITION - .withDescription("failed to load iceberg table") + .withDescription( + String.format("failed to load iceberg table: %s", e.getMessage())) .withCause(e) .asRuntimeException(); } @@ -63,102 +83,153 @@ public SinkBase create(TableSchema tableSchema, Map tablePropert if (mode.equals("append-only")) { return new IcebergSink(tableSchema, hadoopCatalog, icebergTable, FILE_FORMAT); } else if (mode.equals("upsert")) { - return new UpsertIcebergSink(tableSchema, hadoopCatalog, icebergTable, FILE_FORMAT); + return new UpsertIcebergSink( + tableSchema, hadoopCatalog, + icebergTable, FILE_FORMAT); } throw UNIMPLEMENTED.withDescription("unsupported mode: " + mode).asRuntimeException(); } @Override - public void validate(TableSchema tableSchema, Map tableProperties) { - if (!tableProperties.containsKey(SINK_MODE_PROP) // only append-only, upsert - || !tableProperties.containsKey(LOCATION_TYPE_PROP) // only local, s3, minio + public void validate( + TableSchema tableSchema, Map tableProperties, SinkType sinkType) { + if (!tableProperties.containsKey(SINK_TYPE_PROP) // only append-only, upsert || !tableProperties.containsKey(WAREHOUSE_PATH_PROP) || !tableProperties.containsKey(DATABASE_NAME_PROP) || !tableProperties.containsKey(TABLE_NAME_PROP)) { throw INVALID_ARGUMENT .withDescription( String.format( - "%s, %s, %s, %s or %s is not specified", - SINK_MODE_PROP, - LOCATION_TYPE_PROP, + "%s, %s, %s or %s is not specified", + SINK_TYPE_PROP, WAREHOUSE_PATH_PROP, DATABASE_NAME_PROP, TABLE_NAME_PROP)) .asRuntimeException(); } - String mode = tableProperties.get(SINK_MODE_PROP); - String location = tableProperties.get(LOCATION_TYPE_PROP); - String warehousePath = tableProperties.get(WAREHOUSE_PATH_PROP); + String mode = tableProperties.get(SINK_TYPE_PROP); String databaseName = tableProperties.get(DATABASE_NAME_PROP); String tableName = tableProperties.get(TABLE_NAME_PROP); + String warehousePath = getWarehousePath(tableProperties); + + String schema = parseWarehousePathScheme(warehousePath); TableIdentifier tableIdentifier = TableIdentifier.of(databaseName, tableName); - HadoopCatalog hadoopCatalog = createHadoopCatalog(location, warehousePath); - Table icebergTable; - try { - icebergTable = hadoopCatalog.loadTable(tableIdentifier); + Configuration hadoopConf = createHadoopConf(schema, tableProperties); + + try (HadoopCatalog hadoopCatalog = new HadoopCatalog(hadoopConf, warehousePath); ) { + + Table icebergTable = hadoopCatalog.loadTable(tableIdentifier); + + // Check that all columns in tableSchema exist in the iceberg table. + for (String columnName : tableSchema.getColumnNames()) { + if (icebergTable.schema().findField(columnName) == null) { + throw Status.FAILED_PRECONDITION + .withDescription( + String.format( + "table schema does not match. Column %s not found in iceberg table", + columnName)) + .asRuntimeException(); + } + } + + // Check that all required columns in the iceberg table exist in tableSchema. + Set columnNames = Set.of(tableSchema.getColumnNames()); + for (Types.NestedField column : icebergTable.schema().columns()) { + if (column.isRequired() && !columnNames.contains(column.name())) { + throw Status.FAILED_PRECONDITION + .withDescription( + String.format("missing a required field %s", column.name())) + .asRuntimeException(); + } + } + } catch (Exception e) { - LOG.error("load table error: {}", e); - throw Status.FAILED_PRECONDITION - .withDescription("failed to load iceberg table") + throw Status.INTERNAL + .withDescription( + String.format("failed to load iceberg table: %s", e.getMessage())) .withCause(e) .asRuntimeException(); } - // check that all columns in tableSchema exist in the iceberg table - for (String columnName : tableSchema.getColumnNames()) { - if (icebergTable.schema().findField(columnName) == null) { - LOG.error("column not found: {}", columnName); - throw Status.FAILED_PRECONDITION - .withDescription("table schema does not match") - .asRuntimeException(); - } - } - // check that all required columns in the iceberg table exist in tableSchema - Set columnNames = Set.of(tableSchema.getColumnNames()); - for (Types.NestedField column : icebergTable.schema().columns()) { - if (column.isRequired() && !columnNames.contains(column.name())) { - LOG.error("required column not found: {}", column.name()); - throw Status.FAILED_PRECONDITION - .withDescription( - String.format("missing a required field %s", column.name())) - .asRuntimeException(); - } - } if (!mode.equals("append-only") && !mode.equals("upsert")) { throw UNIMPLEMENTED.withDescription("unsupported mode: " + mode).asRuntimeException(); } - if (mode.equals("upsert")) { - if (tableSchema.getPrimaryKeys().isEmpty()) { - throw Status.FAILED_PRECONDITION - .withDescription("no primary keys for upsert mode") + switch (sinkType) { + case UPSERT: + // For upsert iceberg sink, the user must specify its primary key explicitly. + if (tableSchema.getPrimaryKeys().isEmpty()) { + throw Status.INVALID_ARGUMENT + .withDescription("please define primary key for upsert iceberg sink") + .asRuntimeException(); + } + break; + case APPEND_ONLY: + case FORCE_APPEND_ONLY: + break; + default: + throw Status.INTERNAL.asRuntimeException(); + } + } + + private static String getWarehousePath(Map tableProperties) { + String warehousePath = tableProperties.get(WAREHOUSE_PATH_PROP); + // unify s3 and s3a + if (warehousePath.startsWith("s3://")) { + return warehousePath.replace("s3://", "s3a://"); + } + return warehousePath; + } + + private static String parseWarehousePathScheme(String warehousePath) { + try { + URI uri = new URI(warehousePath); + String scheme = uri.getScheme(); + if (scheme == null) { + throw INVALID_ARGUMENT + .withDescription("warehouse path should set scheme (e.g. s3a://)") .asRuntimeException(); } + return scheme; + } catch (URISyntaxException e) { + throw INVALID_ARGUMENT + .withDescription( + String.format("invalid warehouse path uri: %s", e.getMessage())) + .withCause(e) + .asRuntimeException(); } } - private HadoopCatalog createHadoopCatalog(String location, String warehousePath) { - Configuration hadoopConf = new Configuration(); - switch (location) { - case "local": - return new HadoopCatalog(hadoopConf, warehousePath); - case "s3": + private Configuration createHadoopConf(String scheme, Map tableProperties) { + switch (scheme) { + case "file": + return new Configuration(); + case "s3a": + Configuration hadoopConf = new Configuration(); hadoopConf.set(confIoImpl, s3FileIOImpl); - String s3aPath = "s3a:" + warehousePath.substring(warehousePath.indexOf('/')); - return new HadoopCatalog(hadoopConf, s3aPath); - case "minio": - hadoopConf.set(confIoImpl, s3FileIOImpl); - MinioUrlParser minioUrlParser = new MinioUrlParser(warehousePath); - hadoopConf.set(confEndpoint, minioUrlParser.getEndpoint()); - hadoopConf.set(confKey, minioUrlParser.getKey()); - hadoopConf.set(confSecret, minioUrlParser.getSecret()); hadoopConf.setBoolean(confPathStyleAccess, true); - return new HadoopCatalog(hadoopConf, "s3a://" + minioUrlParser.getBucket()); + if (!tableProperties.containsKey(S3_ENDPOINT_PROP)) { + throw INVALID_ARGUMENT + .withDescription( + String.format( + "Should set %s for warehouse with scheme %s", + S3_ENDPOINT_PROP, scheme)) + .asRuntimeException(); + } + hadoopConf.set(confEndpoint, tableProperties.get(S3_ENDPOINT_PROP)); + if (tableProperties.containsKey(S3_ACCESS_KEY_PROP)) { + hadoopConf.set(confKey, tableProperties.get(S3_ACCESS_KEY_PROP)); + } + if (tableProperties.containsKey(S3_SECRET_KEY_PROP)) { + hadoopConf.set(confSecret, tableProperties.get(S3_SECRET_KEY_PROP)); + } + return hadoopConf; default: throw UNIMPLEMENTED - .withDescription("unsupported iceberg sink type: " + location) + .withDescription( + String.format("scheme %s not supported for warehouse path", scheme)) .asRuntimeException(); } } diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowMap.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowMap.java index 9ea10e4a7e456..a817e715a45d3 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowMap.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowMap.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.PkComparator; @@ -5,6 +19,7 @@ import io.grpc.Status; import java.util.List; import java.util.TreeMap; +import org.apache.iceberg.data.Record; public class SinkRowMap { TreeMap>, SinkRowOp> map = new TreeMap<>(new PkComparator()); @@ -13,7 +28,7 @@ public void clear() { map.clear(); } - public void insert(List> key, SinkRow row) { + public void insert(List> key, Record row) { if (!map.containsKey(key)) { map.put(key, SinkRowOp.insertOp(row)); } else { @@ -28,19 +43,20 @@ public void insert(List> key, SinkRow row) { } } - public void delete(List> key, SinkRow row) { + public void delete(List> key, Record row) { if (!map.containsKey(key)) { map.put(key, SinkRowOp.deleteOp(row)); } else { SinkRowOp sinkRowOp = map.get(key); - SinkRow insert = sinkRowOp.getInsert(); + Record insert = sinkRowOp.getInsert(); if (insert == null) { throw Status.FAILED_PRECONDITION .withDescription("try to double delete a primary key") .asRuntimeException(); } - assertRowValuesEqual(insert, row); - SinkRow delete = sinkRowOp.getDelete(); + // TODO: may enable it again + // assertRowValuesEqual(insert, row); + Record delete = sinkRowOp.getDelete(); if (delete != null) { map.put(key, SinkRowOp.deleteOp(delete)); } else { diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowOp.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowOp.java index 5a167c7bc2985..67b42b078914a 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowOp.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/SinkRowOp.java @@ -1,13 +1,27 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; -import com.risingwave.connector.api.sink.SinkRow; import io.grpc.Status; +import org.apache.iceberg.data.Record; public class SinkRowOp { - private final SinkRow delete; - private final SinkRow insert; + private final Record delete; + private final Record insert; - public static SinkRowOp insertOp(SinkRow row) { + public static SinkRowOp insertOp(Record row) { if (row == null) { throw Status.FAILED_PRECONDITION .withDescription("row op must not be null to initialize insertOp") @@ -16,7 +30,7 @@ public static SinkRowOp insertOp(SinkRow row) { return new SinkRowOp(null, row); } - public static SinkRowOp deleteOp(SinkRow row) { + public static SinkRowOp deleteOp(Record row) { if (row == null) { throw Status.FAILED_PRECONDITION .withDescription("row op must not be null to initialize deleteOp") @@ -25,7 +39,7 @@ public static SinkRowOp deleteOp(SinkRow row) { return new SinkRowOp(row, null); } - public static SinkRowOp updateOp(SinkRow delete, SinkRow insert) { + public static SinkRowOp updateOp(Record delete, Record insert) { if (delete == null || insert == null) { throw Status.FAILED_PRECONDITION .withDescription("row ops must not be null initialize updateOp") @@ -34,7 +48,7 @@ public static SinkRowOp updateOp(SinkRow delete, SinkRow insert) { return new SinkRowOp(delete, insert); } - private SinkRowOp(SinkRow delete, SinkRow insert) { + private SinkRowOp(Record delete, Record insert) { this.delete = delete; this.insert = insert; } @@ -43,11 +57,11 @@ public boolean isDelete() { return insert == null && delete != null; } - public SinkRow getDelete() { + public Record getDelete() { return delete; } - public SinkRow getInsert() { + public Record getInsert() { return insert; } } diff --git a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSink.java b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSink.java index bf3a705cac380..8c26f7b3659e2 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSink.java +++ b/java/connector-node/risingwave-sink-iceberg/src/main/java/com/risingwave/connector/UpsertIcebergSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static io.grpc.Status.INTERNAL; @@ -61,7 +75,7 @@ public UpsertIcebergSink( .collect(Collectors.toList()); } - private Record newRecord(Schema schema, SinkRow row) { + private static Record newRecord(Schema schema, SinkRow row) { Record record = GenericRecord.create(schema); for (int i = 0; i < schema.columns().size(); i++) { record.set(i, row.get(i)); @@ -141,52 +155,57 @@ private List> getKeyFromRow(SinkRow row) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - SinkRow row = rows.next(); - if (row.size() != getTableSchema().getColumnNames().length) { - throw Status.FAILED_PRECONDITION - .withDescription("row values do not match table schema") - .asRuntimeException(); - } - Record record = newRecord(rowSchema, row); - PartitionKey partitionKey = - new PartitionKey(transaction.table().spec(), transaction.table().schema()); - partitionKey.partition(record); - SinkRowMap sinkRowMap; - if (sinkRowMapByPartition.containsKey(partitionKey)) { - sinkRowMap = sinkRowMapByPartition.get(partitionKey); - } else { - sinkRowMap = new SinkRowMap(); - sinkRowMapByPartition.put(partitionKey, sinkRowMap); - } - switch (row.getOp()) { - case INSERT: - sinkRowMap.insert(getKeyFromRow(row), row); - break; - case DELETE: - sinkRowMap.delete(getKeyFromRow(row), row); - break; - case UPDATE_DELETE: - if (updateBufferExists) { - throw Status.FAILED_PRECONDITION - .withDescription("an UPDATE_INSERT should precede an UPDATE_DELETE") - .asRuntimeException(); - } - sinkRowMap.delete(getKeyFromRow(row), row); - updateBufferExists = true; - break; - case UPDATE_INSERT: - if (!updateBufferExists) { - throw Status.FAILED_PRECONDITION - .withDescription("an UPDATE_INSERT should precede an UPDATE_DELETE") - .asRuntimeException(); - } - sinkRowMap.insert(getKeyFromRow(row), row); - updateBufferExists = false; - break; - default: - throw UNIMPLEMENTED - .withDescription("unsupported operation: " + row.getOp()) + try (SinkRow row = rows.next()) { + if (row.size() != getTableSchema().getColumnNames().length) { + throw Status.FAILED_PRECONDITION + .withDescription("row values do not match table schema") .asRuntimeException(); + } + Record record = newRecord(rowSchema, row); + PartitionKey partitionKey = + new PartitionKey(transaction.table().spec(), transaction.table().schema()); + partitionKey.partition(record); + SinkRowMap sinkRowMap; + if (sinkRowMapByPartition.containsKey(partitionKey)) { + sinkRowMap = sinkRowMapByPartition.get(partitionKey); + } else { + sinkRowMap = new SinkRowMap(); + sinkRowMapByPartition.put(partitionKey, sinkRowMap); + } + switch (row.getOp()) { + case INSERT: + sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); + break; + case DELETE: + sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); + break; + case UPDATE_DELETE: + if (updateBufferExists) { + throw Status.FAILED_PRECONDITION + .withDescription( + "an UPDATE_INSERT should precede an UPDATE_DELETE") + .asRuntimeException(); + } + sinkRowMap.delete(getKeyFromRow(row), newRecord(deleteRowSchema, row)); + updateBufferExists = true; + break; + case UPDATE_INSERT: + if (!updateBufferExists) { + throw Status.FAILED_PRECONDITION + .withDescription( + "an UPDATE_INSERT should precede an UPDATE_DELETE") + .asRuntimeException(); + } + sinkRowMap.insert(getKeyFromRow(row), newRecord(rowSchema, row)); + updateBufferExists = false; + break; + default: + throw UNIMPLEMENTED + .withDescription("unsupported operation: " + row.getOp()) + .asRuntimeException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } } @@ -198,13 +217,13 @@ public void sync() { newEqualityDeleteWriter(entry.getKey()); DataWriter dataWriter = newDataWriter(entry.getKey()); for (SinkRowOp sinkRowOp : entry.getValue().map.values()) { - SinkRow insert = sinkRowOp.getInsert(); - SinkRow delete = sinkRowOp.getDelete(); + Record insert = sinkRowOp.getInsert(); + Record delete = sinkRowOp.getDelete(); if (insert != null) { - dataWriter.write(newRecord(rowSchema, insert)); + dataWriter.write(insert); } if (delete != null) { - equalityDeleteWriter.write(newRecord(deleteRowSchema, delete)); + equalityDeleteWriter.write(delete); } } try { diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkFactoryTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkFactoryTest.java index 1c412cbf3b950..832ac2746f973 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkFactoryTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkFactoryTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static org.junit.Assert.*; @@ -16,10 +30,9 @@ import org.junit.Test; public class IcebergSinkFactoryTest { - static String warehousePath = "/tmp/rw-sinknode/iceberg-sink/warehouse"; + static String warehousePath = "file:///tmp/rw-sinknode/iceberg-sink/warehouse"; static String databaseName = "demo_db"; static String tableName = "demo_table"; - static String locationType = "local"; static String sinkMode = "append-only"; static Schema icebergTableSchema = new Schema( @@ -51,10 +64,8 @@ public void testCreate() throws IOException { sinkFactory.create( TableSchema.getMockTableSchema(), Map.of( - IcebergSinkFactory.SINK_MODE_PROP, + IcebergSinkFactory.SINK_TYPE_PROP, sinkMode, - IcebergSinkFactory.LOCATION_TYPE_PROP, - locationType, IcebergSinkFactory.WAREHOUSE_PATH_PROP, warehousePath, IcebergSinkFactory.DATABASE_NAME_PROP, diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkLocalTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkLocalTest.java index ad813d13b85c8..78860f0801cef 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkLocalTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkLocalTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; @@ -7,7 +21,7 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Sets; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; @@ -101,7 +115,7 @@ public void testSync() throws IOException { FileFormat.PARQUET); try { - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -111,7 +125,7 @@ public void testSync() throws IOException { validateTableWithIceberg(expected); validateTableWithSpark(expected); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob"))); validateTableWithIceberg(expected); validateTableWithSpark(expected); @@ -146,8 +160,8 @@ public void testWrite() throws IOException { try { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.INSERT, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.INSERT, 2, "Bob"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkPartitionTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkPartitionTest.java index 46b0a4356893c..54f2d47f154a7 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkPartitionTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/IcebergSinkPartitionTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; @@ -8,7 +22,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import com.risingwave.proto.Data; import java.io.IOException; import java.nio.file.Files; @@ -120,7 +134,7 @@ public void testSync() throws IOException { FileFormat.PARQUET); try { - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice", "aaa"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice", "aaa"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -131,7 +145,7 @@ public void testSync() throws IOException { validateTableWithIceberg(expected); validateTableWithSpark(expected); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob", "bbb"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob", "bbb"))); validateTableWithIceberg(expected); validateTableWithSpark(expected); @@ -167,8 +181,8 @@ public void testWrite() throws IOException { try { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice", "aaa"), - new ArraySinkrow(Op.INSERT, 2, "Bob", "bbb"))); + new ArraySinkRow(Op.INSERT, 1, "Alice", "aaa"), + new ArraySinkRow(Op.INSERT, 2, "Bob", "bbb"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/SinkRowMapTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/SinkRowMapTest.java index 3a349ef16d384..c5048aca40762 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/SinkRowMapTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/SinkRowMapTest.java @@ -1,12 +1,30 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static org.junit.Assert.assertEquals; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import com.risingwave.connector.api.sink.SinkRow; import com.risingwave.proto.Data; import java.util.ArrayList; import java.util.List; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.GenericRecord; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; @@ -14,45 +32,61 @@ public class SinkRowMapTest { @Test public void testInsert() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); - sinkRowMap.insert(key, row); + sinkRowMap.insert(key, r); assertEquals(1, sinkRowMap.map.size()); assertEquals(null, sinkRowMap.map.get(key).getDelete()); - assertEquals(row, sinkRowMap.map.get(key).getInsert()); + assertEquals(r, sinkRowMap.map.get(key).getInsert()); } @Test public void testInsertAfterDelete() { SinkRowMap sinkRowMap = new SinkRowMap(); + Schema schema = + new Schema( + Types.NestedField.optional(0, "id", Types.IntegerType.get()), + Types.NestedField.optional(1, "name", Types.StringType.get())); - SinkRow row1 = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1, "Alice"); + SinkRow row1 = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1, "Alice"); List> key1 = new ArrayList<>(); key1.add((Comparable) row1.get(0)); - SinkRow row2 = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1, "Bob"); + Record r1 = GenericRecord.create(schema); + r1.set(0, row1.get(0)); + r1.set(1, row1.get(1)); + SinkRow row2 = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1, "Bob"); List> key2 = new ArrayList<>(); key2.add((Comparable) row2.get(0)); + Record r2 = GenericRecord.create(schema); + r2.set(0, row2.get(0)); + r2.set(1, row2.get(1)); - sinkRowMap.delete(key1, row1); - sinkRowMap.insert(key1, row2); + sinkRowMap.delete(key1, r1); + sinkRowMap.insert(key1, r2); assertEquals(1, sinkRowMap.map.size()); - assertEquals(row1, sinkRowMap.map.get(key1).getDelete()); - assertEquals(row2, sinkRowMap.map.get(key1).getInsert()); + assertEquals(r1, sinkRowMap.map.get(key1).getDelete()); + assertEquals(r2, sinkRowMap.map.get(key1).getInsert()); } @Test public void testInsertAfterInsert() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); - sinkRowMap.insert(key, row); + sinkRowMap.insert(key, r); boolean exceptionThrown = false; try { - sinkRowMap.insert(key, row); + sinkRowMap.insert(key, r); } catch (RuntimeException e) { exceptionThrown = true; Assert.assertTrue( @@ -69,27 +103,35 @@ public void testInsertAfterInsert() { public void testDelete() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); - sinkRowMap.delete(key, row); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); + + sinkRowMap.delete(key, r); assertEquals(1, sinkRowMap.map.size()); assertEquals(null, sinkRowMap.map.get(key).getInsert()); - assertEquals(row, sinkRowMap.map.get(key).getDelete()); + assertEquals(r, sinkRowMap.map.get(key).getDelete()); } @Test public void testDeleteAfterDelete() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); - sinkRowMap.delete(key, row); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); + + sinkRowMap.delete(key, r); boolean exceptionThrown = false; try { - sinkRowMap.delete(key, row); + sinkRowMap.delete(key, r); } catch (RuntimeException e) { exceptionThrown = true; Assert.assertTrue( @@ -104,12 +146,16 @@ public void testDeleteAfterDelete() { public void testDeleteAfterInsert() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); - sinkRowMap.insert(key, row); - sinkRowMap.delete(key, row); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); + + sinkRowMap.insert(key, r); + sinkRowMap.delete(key, r); assertEquals(0, sinkRowMap.map.size()); } @@ -117,29 +163,44 @@ public void testDeleteAfterInsert() { public void testDeleteAfterUpdate() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row1 = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1, "Alice"); + Schema schema = + new Schema( + Types.NestedField.optional(0, "id", Types.IntegerType.get()), + Types.NestedField.optional(1, "name", Types.StringType.get())); + + SinkRow row1 = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1, "Alice"); List> key1 = new ArrayList<>(); key1.add((Comparable) row1.get(0)); - SinkRow row2 = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1, "Clare"); + Record r1 = GenericRecord.create(schema); + r1.set(0, row1.get(0)); + r1.set(1, row1.get(1)); + + SinkRow row2 = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1, "Clare"); List> key2 = new ArrayList<>(); key2.add((Comparable) row2.get(0)); + Record r2 = GenericRecord.create(schema); + r2.set(0, row2.get(0)); + r2.set(1, row2.get(1)); - sinkRowMap.delete(key1, row1); - sinkRowMap.insert(key2, row2); - sinkRowMap.delete(key2, row2); + sinkRowMap.delete(key1, r1); + sinkRowMap.insert(key2, r2); + sinkRowMap.delete(key2, r2); assertEquals(1, sinkRowMap.map.size()); assertEquals(null, sinkRowMap.map.get(key1).getInsert()); - assertEquals(row1, sinkRowMap.map.get(key1).getDelete()); + assertEquals(r1, sinkRowMap.map.get(key1).getDelete()); } @Test public void testClear() { SinkRowMap sinkRowMap = new SinkRowMap(); - SinkRow row = new ArraySinkrow(Data.Op.OP_UNSPECIFIED, 1); + SinkRow row = new ArraySinkRow(Data.Op.OP_UNSPECIFIED, 1); List> key = new ArrayList<>(); key.add((Comparable) row.get(0)); - sinkRowMap.insert(key, row); + Schema schema = new Schema(Types.NestedField.optional(0, "id", Types.IntegerType.get())); + Record r = GenericRecord.create(schema); + r.set(0, row.get(0)); + sinkRowMap.insert(key, r); sinkRowMap.clear(); assertEquals(0, sinkRowMap.map.size()); diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkLocalTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkLocalTest.java index 87494186bfac1..3ee18f546c4fa 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkLocalTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkLocalTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; @@ -7,7 +21,7 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Sets; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; @@ -101,7 +115,7 @@ public void testSync() throws IOException { FileFormat.PARQUET); try { - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -111,7 +125,7 @@ public void testSync() throws IOException { validateTableWithIceberg(expected); validateTableWithSpark(expected); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob"))); validateTableWithIceberg(expected); validateTableWithSpark(expected); @@ -146,11 +160,11 @@ public void testWrite() throws IOException { try { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.INSERT, 2, "Bob"), - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Alice"), - new ArraySinkrow(Op.UPDATE_INSERT, 1, "Clare"), - new ArraySinkrow(Op.DELETE, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.INSERT, 2, "Bob"), + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Alice"), + new ArraySinkRow(Op.UPDATE_INSERT, 1, "Clare"), + new ArraySinkRow(Op.DELETE, 2, "Bob"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -162,9 +176,9 @@ public void testWrite() throws IOException { sink.write( Iterators.forArray( - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Clare"), - new ArraySinkrow(Op.UPDATE_INSERT, 1, "Alice"), - new ArraySinkrow(Op.DELETE, 1, "Alice"))); + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Clare"), + new ArraySinkRow(Op.UPDATE_INSERT, 1, "Alice"), + new ArraySinkRow(Op.DELETE, 1, "Alice"))); sink.sync(); validateTableWithIceberg(Sets.newHashSet()); diff --git a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkPartitionTest.java b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkPartitionTest.java index c9fcd52858720..70207be05403c 100644 --- a/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkPartitionTest.java +++ b/java/connector-node/risingwave-sink-iceberg/src/test/java/com/risingwave/connector/UpsertIcebergSinkPartitionTest.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static com.risingwave.proto.Data.*; @@ -8,7 +22,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import com.risingwave.proto.Data; import java.io.IOException; import java.nio.file.Files; @@ -115,7 +129,7 @@ public void testSync() throws IOException { FileFormat.PARQUET); try { - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice", "aaa"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice", "aaa"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -126,7 +140,7 @@ public void testSync() throws IOException { validateTableWithIceberg(expected); validateTableWithSpark(expected); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob", "bbb"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob", "bbb"))); validateTableWithIceberg(expected); validateTableWithSpark(expected); @@ -162,11 +176,11 @@ public void testWrite() throws IOException { try { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice", "aaa"), - new ArraySinkrow(Op.INSERT, 2, "Bob", "bbb"), - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Alice", "aaa"), - new ArraySinkrow(Op.UPDATE_INSERT, 1, "Clare", "ccc"), - new ArraySinkrow(Op.DELETE, 2, "Bob", "bbb"))); + new ArraySinkRow(Op.INSERT, 1, "Alice", "aaa"), + new ArraySinkRow(Op.INSERT, 2, "Bob", "bbb"), + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Alice", "aaa"), + new ArraySinkRow(Op.UPDATE_INSERT, 1, "Clare", "ccc"), + new ArraySinkRow(Op.DELETE, 2, "Bob", "bbb"))); sink.sync(); Record record1 = GenericRecord.create(icebergTableSchema); @@ -179,9 +193,9 @@ public void testWrite() throws IOException { sink.write( Iterators.forArray( - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Clare", "ccc"), - new ArraySinkrow(Op.UPDATE_INSERT, 1, "Alice", "aaa"), - new ArraySinkrow(Op.DELETE, 1, "Alice", "aaa"))); + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Clare", "ccc"), + new ArraySinkRow(Op.UPDATE_INSERT, 1, "Alice", "aaa"), + new ArraySinkRow(Op.DELETE, 1, "Alice", "aaa"))); sink.sync(); validateTableWithIceberg(Sets.newHashSet()); diff --git a/java/connector-node/risingwave-sink-jdbc/pom.xml b/java/connector-node/risingwave-sink-jdbc/pom.xml index 1b6ca9de1e962..65c59aa7c3b5e 100644 --- a/java/connector-node/risingwave-sink-jdbc/pom.xml +++ b/java/connector-node/risingwave-sink-jdbc/pom.xml @@ -23,8 +23,8 @@ connector-api - org.slf4j - slf4j-api + org.apache.logging.log4j + log4j-api org.apache.logging.log4j diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java index 415a880630a4a..4119aa7256a94 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSink.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; @@ -6,7 +20,9 @@ import com.risingwave.proto.Data; import io.grpc.Status; import java.sql.*; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.slf4j.Logger; @@ -16,10 +32,13 @@ public class JDBCSink extends SinkBase { public static final String INSERT_TEMPLATE = "INSERT INTO %s (%s) VALUES (%s)"; private static final String DELETE_TEMPLATE = "DELETE FROM %s WHERE %s"; private static final String UPDATE_TEMPLATE = "UPDATE %s SET %s WHERE %s"; + private static final String ERROR_REPORT_TEMPLATE = "Error when exec %s, message %s"; private final String tableName; private final Connection conn; private final String jdbcUrl; + private final List pkColumnNames; + public static final String JDBC_COLUMN_NAME_KEY = "COLUMN_NAME"; private String updateDeleteConditionBuffer; private Object[] updateDeleteValueBuffer; @@ -34,9 +53,30 @@ public JDBCSink(String tableName, String jdbcUrl, TableSchema tableSchema) { try { this.conn = DriverManager.getConnection(jdbcUrl); this.conn.setAutoCommit(false); + this.pkColumnNames = getPkColumnNames(conn, tableName); + } catch (SQLException e) { + throw Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); + } + } + + private static List getPkColumnNames(Connection conn, String tableName) { + List pkColumnNames = new ArrayList<>(); + try { + var pks = conn.getMetaData().getPrimaryKeys(null, null, tableName); + while (pks.next()) { + pkColumnNames.add(pks.getString(JDBC_COLUMN_NAME_KEY)); + } } catch (SQLException e) { - throw Status.INTERNAL.withCause(e).asRuntimeException(); + throw Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } + LOG.info("detected pk {}", pkColumnNames); + return pkColumnNames; } public JDBCSink(Connection conn, TableSchema tableSchema, String tableName) { @@ -44,6 +84,7 @@ public JDBCSink(Connection conn, TableSchema tableSchema, String tableName) { this.tableName = tableName; this.jdbcUrl = null; this.conn = conn; + this.pkColumnNames = getPkColumnNames(conn, tableName); } private PreparedStatement prepareStatement(SinkRow row) { @@ -65,35 +106,75 @@ private PreparedStatement prepareStatement(SinkRow row) { } return stmt; } catch (SQLException e) { - throw io.grpc.Status.INTERNAL.withCause(e).asRuntimeException(); + throw io.grpc.Status.INTERNAL + .withDescription( + String.format( + ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } case DELETE: - String deleteCondition = - getTableSchema().getPrimaryKeys().stream() - .map(key -> key + " = ?") - .collect(Collectors.joining(" AND ")); + String deleteCondition; + if (this.pkColumnNames.isEmpty()) { + deleteCondition = + IntStream.range(0, getTableSchema().getNumColumns()) + .mapToObj( + index -> + getTableSchema().getColumnNames()[index] + + " = ?") + .collect(Collectors.joining(" AND ")); + } else { + deleteCondition = + this.pkColumnNames.stream() + .map(key -> key + " = ?") + .collect(Collectors.joining(" AND ")); + } String deleteStmt = String.format(DELETE_TEMPLATE, tableName, deleteCondition); try { int placeholderIdx = 1; PreparedStatement stmt = conn.prepareStatement(deleteStmt, Statement.RETURN_GENERATED_KEYS); - for (String primaryKey : getTableSchema().getPrimaryKeys()) { + for (String primaryKey : this.pkColumnNames) { Object fromRow = getTableSchema().getFromRow(primaryKey, row); stmt.setObject(placeholderIdx++, fromRow); } return stmt; } catch (SQLException e) { - throw Status.INTERNAL.withCause(e).asRuntimeException(); + throw Status.INTERNAL + .withDescription( + String.format( + ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } case UPDATE_DELETE: - updateDeleteConditionBuffer = - getTableSchema().getPrimaryKeys().stream() - .map(key -> key + " = ?") - .collect(Collectors.joining(" AND ")); - updateDeleteValueBuffer = - getTableSchema().getPrimaryKeys().stream() - .map(key -> getTableSchema().getFromRow(key, row)) - .toArray(); + if (this.pkColumnNames.isEmpty()) { + updateDeleteConditionBuffer = + IntStream.range(0, getTableSchema().getNumColumns()) + .mapToObj( + index -> + getTableSchema().getColumnNames()[index] + + " = ?") + .collect(Collectors.joining(" AND ")); + updateDeleteValueBuffer = + IntStream.range(0, getTableSchema().getNumColumns()) + .mapToObj( + index -> + getTableSchema() + .getFromRow( + getTableSchema() + .getColumnNames()[ + index], + row)) + .toArray(); + } else { + updateDeleteConditionBuffer = + this.pkColumnNames.stream() + .map(key -> key + " = ?") + .collect(Collectors.joining(" AND ")); + updateDeleteValueBuffer = + this.pkColumnNames.stream() + .map(key -> getTableSchema().getFromRow(key, row)) + .toArray(); + } LOG.debug( "update delete condition: {} on values {}", updateDeleteConditionBuffer, @@ -130,7 +211,11 @@ private PreparedStatement prepareStatement(SinkRow row) { updateDeleteValueBuffer = null; return stmt; } catch (SQLException e) { - throw Status.INTERNAL.withCause(e).asRuntimeException(); + throw Status.INTERNAL + .withDescription( + String.format( + ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } default: throw Status.INVALID_ARGUMENT @@ -142,22 +227,31 @@ private PreparedStatement prepareStatement(SinkRow row) { @Override public void write(Iterator rows) { while (rows.hasNext()) { - SinkRow row = rows.next(); - PreparedStatement stmt = prepareStatement(row); - if (row.getOp() == Data.Op.UPDATE_DELETE) { - continue; - } - if (stmt != null) { - try { - LOG.debug("Executing statement: " + stmt); - stmt.executeUpdate(); - } catch (SQLException e) { - throw Status.INTERNAL.withCause(e).asRuntimeException(); + try (SinkRow row = rows.next()) { + PreparedStatement stmt = prepareStatement(row); + if (row.getOp() == Data.Op.UPDATE_DELETE) { + continue; } - } else { - throw Status.INTERNAL - .withDescription("empty statement encoded") - .asRuntimeException(); + if (stmt != null) { + try { + LOG.debug("Executing statement: {}", stmt); + stmt.executeUpdate(); + } catch (SQLException e) { + throw Status.INTERNAL + .withDescription( + String.format( + ERROR_REPORT_TEMPLATE, + e.getSQLState(), + e.getMessage())) + .asRuntimeException(); + } + } else { + throw Status.INTERNAL + .withDescription("empty statement encoded") + .asRuntimeException(); + } + } catch (Exception e) { + throw new RuntimeException(e); } } } @@ -173,7 +267,10 @@ public void sync() { try { conn.commit(); } catch (SQLException e) { - throw io.grpc.Status.INTERNAL.withCause(e).asRuntimeException(); + throw io.grpc.Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } } @@ -182,7 +279,10 @@ public void drop() { try { conn.close(); } catch (SQLException e) { - throw io.grpc.Status.INTERNAL.withCause(e).asRuntimeException(); + throw io.grpc.Status.INTERNAL + .withDescription( + String.format(ERROR_REPORT_TEMPLATE, e.getSQLState(), e.getMessage())) + .asRuntimeException(); } } diff --git a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSinkFactory.java b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSinkFactory.java index ac343f4dcafdb..8e03db0032432 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSinkFactory.java +++ b/java/connector-node/risingwave-sink-jdbc/src/main/java/com/risingwave/connector/JDBCSinkFactory.java @@ -1,28 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import com.risingwave.connector.api.TableSchema; import com.risingwave.connector.api.sink.SinkBase; import com.risingwave.connector.api.sink.SinkFactory; +import com.risingwave.proto.Catalog.SinkType; import io.grpc.Status; import java.sql.*; +import java.util.HashSet; import java.util.Map; +import java.util.Set; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class JDBCSinkFactory implements SinkFactory { + + private static final Logger LOG = LoggerFactory.getLogger(JDBCSinkFactory.class); + public static final String JDBC_URL_PROP = "jdbc.url"; public static final String TABLE_NAME_PROP = "table.name"; @Override public SinkBase create(TableSchema tableSchema, Map tableProperties) { - // TODO: Remove this call to `validate` after supporting sink validation in risingwave. - validate(tableSchema, tableProperties); - String tableName = tableProperties.get(TABLE_NAME_PROP); String jdbcUrl = tableProperties.get(JDBC_URL_PROP); return new JDBCSink(tableName, jdbcUrl, tableSchema); } @Override - public void validate(TableSchema tableSchema, Map tableProperties) { + public void validate( + TableSchema tableSchema, Map tableProperties, SinkType sinkType) { if (!tableProperties.containsKey(JDBC_URL_PROP) || !tableProperties.containsKey(TABLE_NAME_PROP)) { throw Status.INVALID_ARGUMENT @@ -33,12 +53,67 @@ public void validate(TableSchema tableSchema, Map tablePropertie } String jdbcUrl = tableProperties.get(JDBC_URL_PROP); + String tableName = tableProperties.get(TABLE_NAME_PROP); + Set jdbcColumns = new HashSet<>(); + Set jdbcPk = new HashSet<>(); + Set jdbcTableNames = new HashSet<>(); - try { - Connection conn = DriverManager.getConnection(jdbcUrl); - conn.close(); + try (Connection conn = DriverManager.getConnection(jdbcUrl); + ResultSet tableNamesResultSet = + conn.getMetaData().getTables(null, null, "%", null); + ResultSet columnResultSet = + conn.getMetaData().getColumns(null, null, tableName, null); + ResultSet pkResultSet = + conn.getMetaData().getPrimaryKeys(null, null, tableName); ) { + while (tableNamesResultSet.next()) { + jdbcTableNames.add(tableNamesResultSet.getString("TABLE_NAME")); + } + while (columnResultSet.next()) { + jdbcColumns.add(columnResultSet.getString("COLUMN_NAME")); + } + while (pkResultSet.next()) { + jdbcPk.add(pkResultSet.getString("COLUMN_NAME")); + } } catch (SQLException e) { - throw Status.INTERNAL.withCause(e).asRuntimeException(); + throw Status.INVALID_ARGUMENT + .withDescription("failed to connect to target database: " + e.getSQLState()) + .asRuntimeException(); + } + + if (!jdbcTableNames.contains(tableName)) { + throw Status.INVALID_ARGUMENT + .withDescription("table not found: " + tableName) + .asRuntimeException(); + } + + // Check that all columns in tableSchema exist in the JDBC table. + for (String sinkColumn : tableSchema.getColumnNames()) { + if (!jdbcColumns.contains(sinkColumn)) { + LOG.error("column not found: {}", sinkColumn); + throw Status.FAILED_PRECONDITION + .withDescription( + "table schema does not match, column not found: " + sinkColumn) + .asRuntimeException(); + } + } + + if (sinkType == SinkType.UPSERT) { + // For JDBC sink, we enforce the primary key as that of the JDBC table's. The JDBC table + // must have primary key. + if (jdbcPk.isEmpty()) { + throw Status.INVALID_ARGUMENT + .withDescription( + "JDBC table has no primary key, consider making the sink append-only or defining primary key on the JDBC table") + .asRuntimeException(); + } + // The user is not allowed to define the primary key for upsert JDBC sink. + if (!tableSchema.getPrimaryKeys().isEmpty()) { + throw Status.INVALID_ARGUMENT + .withDescription( + "should not define primary key on upsert JDBC sink, find downstream primary key: " + + jdbcPk.toString()) + .asRuntimeException(); + } } } } diff --git a/java/connector-node/risingwave-sink-jdbc/src/test/java/com/risingwave/connector/JDBCSinkTest.java b/java/connector-node/risingwave-sink-jdbc/src/test/java/com/risingwave/connector/JDBCSinkTest.java index 18418c647ba15..62a8f63c207ab 100644 --- a/java/connector-node/risingwave-sink-jdbc/src/test/java/com/risingwave/connector/JDBCSinkTest.java +++ b/java/connector-node/risingwave-sink-jdbc/src/test/java/com/risingwave/connector/JDBCSinkTest.java @@ -1,10 +1,24 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector; import static org.junit.Assert.*; import com.google.common.collect.Iterators; import com.risingwave.connector.api.TableSchema; -import com.risingwave.connector.api.sink.ArraySinkrow; +import com.risingwave.connector.api.sink.ArraySinkRow; import com.risingwave.proto.Data.Op; import java.sql.*; import org.junit.Test; @@ -19,7 +33,7 @@ public void testJDBCSync() throws SQLException { JDBCSink sink = new JDBCSink(conn, TableSchema.getMockTableSchema(), "test"); createMockTable(conn, sink.getTableName()); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 1, "Alice"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 1, "Alice"))); sink.sync(); Statement stmt = conn.createStatement(); @@ -30,7 +44,7 @@ public void testJDBCSync() throws SQLException { } assertEquals(1, count); - sink.write(Iterators.forArray(new ArraySinkrow(Op.INSERT, 2, "Bob"))); + sink.write(Iterators.forArray(new ArraySinkRow(Op.INSERT, 2, "Bob"))); sink.sync(); stmt = conn.createStatement(); rs = stmt.executeQuery("SELECT * FROM test"); @@ -69,11 +83,11 @@ public void testJDBCWrite() throws SQLException { sink.write( Iterators.forArray( - new ArraySinkrow(Op.INSERT, 1, "Alice"), - new ArraySinkrow(Op.INSERT, 2, "Bob"), - new ArraySinkrow(Op.UPDATE_DELETE, 1, "Alice"), - new ArraySinkrow(Op.UPDATE_INSERT, 1, "Clare"), - new ArraySinkrow(Op.DELETE, 2, "Bob"))); + new ArraySinkRow(Op.INSERT, 1, "Alice"), + new ArraySinkRow(Op.INSERT, 2, "Bob"), + new ArraySinkRow(Op.UPDATE_DELETE, 1, "Alice"), + new ArraySinkRow(Op.UPDATE_INSERT, 1, "Clare"), + new ArraySinkRow(Op.DELETE, 2, "Bob"))); sink.sync(); Statement stmt = conn.createStatement(); diff --git a/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/converters/DatetimeTypeConverter.java b/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/converters/DatetimeTypeConverter.java index 58fad86c6c8b7..ca63649d27360 100644 --- a/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/converters/DatetimeTypeConverter.java +++ b/java/connector-node/risingwave-source-cdc/src/main/java/com/risingwave/connector/cdc/debezium/converters/DatetimeTypeConverter.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.connector.cdc.debezium.converters; import io.debezium.spi.converter.CustomConverter; @@ -12,7 +26,6 @@ public class DatetimeTypeConverter implements CustomConverter { private DateTimeFormatter dateFormatter = DateTimeFormatter.ISO_DATE; - private static final String EPOCH_DAY = "1970-01-01"; @Override public void configure(Properties props) { @@ -26,7 +39,7 @@ public void converterFor( SchemaBuilder schemaBuilder = null; Converter converter = null; if ("DATE".equals(sqlType)) { - schemaBuilder = SchemaBuilder.string().name("risingwave.cdc.date.string"); + schemaBuilder = SchemaBuilder.string().name("rw.cdc.date.string"); converter = this::convertDate; } if (schemaBuilder != null) { @@ -36,7 +49,7 @@ public void converterFor( private String convertDate(Object input) { if (input == null) { - return EPOCH_DAY; + return null; } var epochDay = Date.toEpochDay(input, null); LocalDate date = LocalDate.ofEpochDay(epochDay); diff --git a/java/connector-node/risingwave-source-test/pom.xml b/java/connector-node/risingwave-source-test/pom.xml new file mode 100644 index 0000000000000..8959ebb39467a --- /dev/null +++ b/java/connector-node/risingwave-source-test/pom.xml @@ -0,0 +1,91 @@ + + + + java-parent + com.risingwave.java + 1.0-SNAPSHOT + ../../pom.xml + + 4.0.0 + risingwave-source-test + jar + risingwave-source-test + + + 1.17.6 + + + + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-core + + + junit + junit + test + + + org.assertj + assertj-core + 3.24.2 + test + + + com.zaxxer + HikariCP + 5.0.1 + test + + + org.testcontainers + testcontainers + ${testcontainers.version} + test + + + org.testcontainers + mysql + ${testcontainers.version} + test + + + org.testcontainers + postgresql + ${testcontainers.version} + test + + + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} + test + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + test + + + + com.risingwave.java + risingwave-source-cdc + test + + + com.risingwave.java + risingwave-connector-service + test + + + diff --git a/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/MySQLSourceTest.java b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/MySQLSourceTest.java new file mode 100644 index 0000000000000..34f2bc24cd6ef --- /dev/null +++ b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/MySQLSourceTest.java @@ -0,0 +1,205 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import static org.assertj.core.api.Assertions.*; +import static org.junit.Assert.assertEquals; + +import com.risingwave.proto.ConnectorServiceProto.*; +import io.grpc.*; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import javax.sql.DataSource; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.MySQLContainer; +import org.testcontainers.utility.MountableFile; + +public class MySQLSourceTest { + + static final Logger LOG = LoggerFactory.getLogger(MySQLSourceTest.class.getName()); + + private static final MySQLContainer mysql = + new MySQLContainer<>("mysql:8.0") + .withDatabaseName("test") + .withUsername("root") + .withCopyFileToContainer( + MountableFile.forClasspathResource("my.cnf"), "/etc/my.cnf"); + + public static Server connectorServer = + ServerBuilder.forPort(ConnectorService.DEFAULT_PORT) + .addService(new ConnectorServiceImpl()) + .build(); + + public static SourceTestClient testClient = + new SourceTestClient( + Grpc.newChannelBuilder( + "localhost:" + ConnectorService.DEFAULT_PORT, + InsecureChannelCredentials.create()) + .build()); + + private static DataSource mysqlDataSource; + + @BeforeClass + public static void init() { + // generate orders.tbl test data + SourceTestClient.genOrdersTable(10000); + // start connector server and mysql... + try { + connectorServer.start(); + LOG.info("connector service started"); + mysql.withCopyFileToContainer( + MountableFile.forClasspathResource("orders.tbl"), "/home/orders.tbl"); + mysql.start(); + mysqlDataSource = SourceTestClient.getDataSource(mysql); + LOG.info("mysql started"); + } catch (IOException e) { + fail("IO exception: ", e); + } + // check mysql configuration... + try { + Connection connection = SourceTestClient.connect(mysqlDataSource); + ResultSet resultSet = + SourceTestClient.performQuery( + connection, testClient.sqlStmts.getProperty("mysql.bin_log")); + assertThat(resultSet.getString("Value")).isEqualTo("ON").as("MySQL: bin_log ON"); + connection.close(); + } catch (SQLException e) { + fail("SQL exception: ", e); + } + } + + @AfterClass + public static void cleanup() { + connectorServer.shutdown(); + mysql.stop(); + } + + // create a TPC-H orders table in mysql + // insert 10,000 rows into orders + // check if the number of changes debezium captures is 10,000 + @Test + public void testLines() throws InterruptedException, SQLException { + ExecutorService executorService = Executors.newFixedThreadPool(1); + Connection connection = SourceTestClient.connect(mysqlDataSource); + String query = testClient.sqlStmts.getProperty("tpch.create.orders"); + SourceTestClient.performQuery(connection, query); + query = + "LOAD DATA INFILE '/home/orders.tbl' " + + "INTO TABLE orders " + + "CHARACTER SET UTF8 " + + "FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n';"; + SourceTestClient.performQuery(connection, query); + Iterator eventStream = + testClient.getEventStreamStart(mysql, SourceType.MYSQL, "test", "orders"); + Callable countTask = + () -> { + int count = 0; + while (eventStream.hasNext()) { + List messages = eventStream.next().getEventsList(); + for (CdcMessage ignored : messages) { + count++; + } + if (count == 10000) { + return count; + } + } + return count; + }; + Future countResult = executorService.submit(countTask); + try { + int count = countResult.get(); + LOG.info("number of cdc messages received: {}", count); + assertEquals(count, 10000); + } catch (ExecutionException e) { + fail("Execution exception: ", e); + } + connection.close(); + } + + // generates test cases for the risingwave debezium parser + @Ignore + @Test + public void getTestJson() throws InterruptedException, SQLException { + Connection connection = SourceTestClient.connect(mysqlDataSource); + String query = + "CREATE TABLE IF NOT EXISTS orders (" + + "O_KEY BIGINT NOT NULL, " + + "O_BOOL BOOLEAN, " + + "O_TINY TINYINT, " + + "O_INT INT, " + + "O_REAL REAL, " + + "O_DOUBLE DOUBLE, " + + "O_DECIMAL DECIMAL(15, 2), " + + "O_CHAR CHAR(15), " + + "O_DATE DATE, " + + "O_TIME TIME, " + + "O_DATETIME DATETIME, " + + "O_TIMESTAMP TIMESTAMP, " + + "O_JSON JSON, " + + "PRIMARY KEY (O_KEY))"; + SourceTestClient.performQuery(connection, query); + Iterator eventStream = + testClient.getEventStreamStart(mysql, SourceType.MYSQL, "test", "orders"); + Thread t1 = + new Thread( + () -> { + while (eventStream.hasNext()) { + List messages = eventStream.next().getEventsList(); + for (CdcMessage msg : messages) { + LOG.info("{}", msg.getPayload()); + } + } + }); + Thread.sleep(3000); + t1.start(); + Thread.sleep(3000); + // Q1: ordinary insert + query = + "INSERT INTO orders (O_KEY, O_BOOL, O_TINY, O_INT, O_REAL, O_DOUBLE, O_DECIMAL, O_CHAR, O_DATE, O_TIME, O_DATETIME, O_TIMESTAMP, O_JSON)" + + "VALUES(111, TRUE, -1, -1111, -11.11, -111.11111, -111.11, 'yes please', '1000-01-01', '00:00:00', '1970-01-01 00:00:00', '1970-01-01 00:00:01.000000', '{\"k1\": \"v1\", \"k2\": 11}')"; + SourceTestClient.performQuery(connection, query); + // Q2: update value of Q1 (value -> new value) + query = + "UPDATE orders SET O_BOOL = FALSE, " + + "O_TINY = 3, " + + "O_INT = 3333, " + + "O_REAL = 33.33, " + + "O_DOUBLE = 333.33333, " + + "O_DECIMAL = 333.33, " + + "O_CHAR = 'no thanks', " + + "O_DATE = '9999-12-31', " + + "O_TIME = '23:59:59', " + + "O_DATETIME = '5138-11-16 09:46:39', " + + "O_TIMESTAMP = '2038-01-09 03:14:07', " + + "O_JSON = '{\"k1\": \"v1_updated\", \"k2\": 33}' " + + "WHERE orders.O_KEY = 111"; + SourceTestClient.performQuery(connection, query); + // Q3: delete value from Q1 + query = "DELETE FROM orders WHERE orders.O_KEY = 111"; + SourceTestClient.performQuery(connection, query); + Thread.sleep(5000); + connection.close(); + } +} diff --git a/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/PostgresSourceTest.java b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/PostgresSourceTest.java new file mode 100644 index 0000000000000..e1ed443c67fc6 --- /dev/null +++ b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/PostgresSourceTest.java @@ -0,0 +1,257 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.risingwave.proto.ConnectorServiceProto; +import com.risingwave.proto.Data; +import io.grpc.*; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import javax.sql.DataSource; +import org.junit.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.PostgreSQLContainer; +import org.testcontainers.utility.MountableFile; + +public class PostgresSourceTest { + private static final Logger LOG = LoggerFactory.getLogger(PostgresSourceTest.class.getName()); + + private static final PostgreSQLContainer pg = + new PostgreSQLContainer<>("postgres:15-alpine") + .withDatabaseName("test") + .withUsername("postgres") + .withCommand("postgres -c wal_level=logical -c max_wal_senders=10"); + + public static Server connectorServer = + ServerBuilder.forPort(ConnectorService.DEFAULT_PORT) + .addService(new ConnectorServiceImpl()) + .build(); + + public static SourceTestClient testClient = + new SourceTestClient( + Grpc.newChannelBuilder( + "localhost:" + ConnectorService.DEFAULT_PORT, + InsecureChannelCredentials.create()) + .build()); + + private static DataSource pgDataSource; + + @BeforeClass + public static void init() { + // generate orders.tbl test data + SourceTestClient.genOrdersTable(10000); + // start connector server and postgres... + try { + connectorServer.start(); + LOG.info("connector service started"); + pg.withCopyFileToContainer( + MountableFile.forClasspathResource("orders.tbl"), "/home/orders.tbl"); + pg.start(); + pg.withUsername("postgres") + .execInContainer( + "sh", + "-c", + "echo 'host replication postgres 172.17.0.1/32 trust' >> /var/lib/postgresql/data/pg_hba.conf"); + pgDataSource = SourceTestClient.getDataSource(pg); + LOG.info("postgres started"); + } catch (IOException e) { + fail("IO exception: ", e); + } catch (InterruptedException e) { + fail("Interrupted exception", e); + } + // check pg configuration... + try { + Connection connection = SourceTestClient.connect(pgDataSource); + SourceTestClient.performQuery(connection, "SELECT pg_reload_conf()"); + ResultSet resultSet = + SourceTestClient.performQuery( + connection, testClient.sqlStmts.getProperty("postgres.wal")); + assertThat(resultSet.getString("wal_level")) + .isEqualTo("logical") + .as("pg: wal_level logical"); + connection.close(); + } catch (SQLException e) { + fail("SQL exception: ", e); + } + } + + @AfterClass + public static void cleanup() { + connectorServer.shutdown(); + pg.stop(); + } + + // create a TPC-H orders table in postgres + // insert 10,000 rows into orders + // check if the number of changes debezium captures is 10,000 + @Test + public void testLines() throws InterruptedException, SQLException { + ExecutorService executorService = Executors.newFixedThreadPool(1); + Connection connection = SourceTestClient.connect(pgDataSource); + String query = testClient.sqlStmts.getProperty("tpch.create.orders"); + SourceTestClient.performQuery(connection, query); + query = "COPY orders FROM '/home/orders.tbl' WITH DELIMITER '|'"; + SourceTestClient.performQuery(connection, query); + Iterator eventStream = + testClient.getEventStreamStart( + pg, ConnectorServiceProto.SourceType.POSTGRES, "test", "orders"); + Callable countTask = + () -> { + int count = 0; + while (eventStream.hasNext()) { + List messages = + eventStream.next().getEventsList(); + for (ConnectorServiceProto.CdcMessage ignored : messages) { + count++; + } + if (count == 10000) { + return count; + } + } + return count; + }; + Future countResult = executorService.submit(countTask); + try { + int count = countResult.get(); + LOG.info("number of cdc messages received: {}", count); + assertEquals(count, 10000); + } catch (ExecutionException e) { + fail("Execution exception: ", e); + } + connection.close(); + } + + // test whether validation catches permission errors + @Test + public void testPermissionCheck() { + Connection connection = SourceTestClient.connect(pgDataSource); + String query = + "CREATE TABLE IF NOT EXISTS orders (o_key BIGINT NOT NULL, o_val INT, PRIMARY KEY (o_key))"; + SourceTestClient.performQuery(connection, query); + // create a partial publication, check whether error is reported + query = "CREATE PUBLICATION dbz_publication FOR TABLE orders (o_key)"; + SourceTestClient.performQuery(connection, query); + ConnectorServiceProto.TableSchema tableSchema = + ConnectorServiceProto.TableSchema.newBuilder() + .addColumns( + ConnectorServiceProto.TableSchema.Column.newBuilder() + .setName("o_key") + .setDataType(Data.DataType.TypeName.INT64) + .build()) + .addColumns( + ConnectorServiceProto.TableSchema.Column.newBuilder() + .setName("o_val") + .setDataType(Data.DataType.TypeName.INT32) + .build()) + .addPkIndices(0) + .build(); + Iterator eventStream1 = + testClient.getEventStreamValidate( + pg, + ConnectorServiceProto.SourceType.POSTGRES, + tableSchema, + "test", + "orders"); + StatusRuntimeException exception1 = + assertThrows( + StatusRuntimeException.class, + () -> { + eventStream1.hasNext(); + }); + assertEquals( + exception1.getMessage(), + "INVALID_ARGUMENT: INTERNAL: The publication 'dbz_publication' does not cover all necessary columns in table orders"); + query = "DROP PUBLICATION dbz_publication"; + SourceTestClient.performQuery(connection, query); + // revoke superuser and replication, check if reports error + query = "ALTER USER " + pg.getUsername() + " nosuperuser noreplication"; + SourceTestClient.performQuery(connection, query); + Iterator eventStream2 = + testClient.getEventStreamValidate( + pg, + ConnectorServiceProto.SourceType.POSTGRES, + tableSchema, + "test", + "orders"); + StatusRuntimeException exception2 = + assertThrows( + StatusRuntimeException.class, + () -> { + eventStream2.hasNext(); + }); + assertEquals( + exception2.getMessage(), + "INVALID_ARGUMENT: INTERNAL: Postgres user must be superuser or replication role to start walsender."); + } + + // generates test cases for the risingwave debezium parser + @Ignore + @Test + public void getTestJson() throws InterruptedException, SQLException { + Connection connection = SourceTestClient.connect(pgDataSource); + String query = + "CREATE TABLE IF NOT EXISTS orders (" + + "O_KEY BIGINT NOT NULL, " + + "O_BOOL BOOLEAN, " + + "O_BITS BIT(3), " + + "O_TINY SMALLINT, " + + "O_INT INT, " + + "O_REAL REAL, " + + "O_DOUBLE DOUBLE PRECISION, " + + "O_DECIMAL DECIMAL(15, 2), " + + "O_CHAR CHAR(15), " + + "O_DATE DATE, " + + "O_TIME TIME, " + + "O_TIMESTAMP TIMESTAMP, " + + "O_JSON JSON, " + + "O_TEXT_ARR TEXT[][], " + + "PRIMARY KEY (O_KEY))"; + SourceTestClient.performQuery(connection, query); + Iterator eventStream = + testClient.getEventStreamStart( + pg, ConnectorServiceProto.SourceType.POSTGRES, "test", "orders"); + Thread t1 = + new Thread( + () -> { + while (eventStream.hasNext()) { + List messages = + eventStream.next().getEventsList(); + for (ConnectorServiceProto.CdcMessage msg : messages) { + LOG.info("{}", msg.getPayload()); + } + } + }); + // Q1: ordinary insert (read) + Thread.sleep(1000); + t1.start(); + query = + "INSERT INTO orders (O_KEY, O_BOOL, O_BITS, O_TINY, O_INT, O_REAL, O_DOUBLE, O_DECIMAL, O_CHAR, O_DATE, O_TIME, O_TIMESTAMP, O_JSON, O_TEXT_ARR)" + + "VALUES(111, TRUE, b'111', -1, -1111, -11.11, -111.11111, -111.11, 'yes please', '2011-11-11', '11:11:11', '2011-11-11 11:11:11.123456', '{\"k1\": \"v1\", \"k2\": 11}', ARRAY[['meeting', 'lunch'], ['training', 'presentation']])"; + SourceTestClient.performQuery(connection, query); + Thread.sleep(1000); + connection.close(); + } +} diff --git a/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/SourceTestClient.java b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/SourceTestClient.java new file mode 100644 index 0000000000000..950e847f6d65e --- /dev/null +++ b/java/connector-node/risingwave-source-test/src/test/java/com/risingwave/connector/SourceTestClient.java @@ -0,0 +1,210 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.connector; + +import static org.assertj.core.api.Assertions.fail; + +import com.risingwave.proto.ConnectorServiceGrpc; +import com.risingwave.proto.ConnectorServiceProto; +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import io.grpc.Channel; +import io.grpc.StatusRuntimeException; +import java.io.*; +import java.net.URI; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Iterator; +import java.util.Properties; +import java.util.Random; +import java.util.UUID; +import javax.sql.DataSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.JdbcDatabaseContainer; + +public class SourceTestClient { + static final Logger LOG = LoggerFactory.getLogger(SourceTestClient.class.getName()); + private final ConnectorServiceGrpc.ConnectorServiceBlockingStub blockingStub; + + public Properties sqlStmts = new Properties(); + + public SourceTestClient(Channel channel) { + blockingStub = ConnectorServiceGrpc.newBlockingStub(channel); + try (InputStream input = + getClass().getClassLoader().getResourceAsStream("stored_queries.properties")) { + sqlStmts.load(input); + } catch (IOException e) { + fail("failed to load sql statements", e); + } + } + + protected static Connection connect(DataSource dataSource) { + Connection connection = null; + try { + connection = dataSource.getConnection(); + } catch (SQLException e) { + fail("SQL Exception: {}", e); + } + return connection; + } + + protected static ResultSet performQuery(Connection connection, String sql) { + ResultSet resultSet = null; + try { + Statement statement = connection.createStatement(); + if (statement.execute(sql)) { + resultSet = statement.getResultSet(); + resultSet.next(); + } else { + LOG.info("updated: " + statement.getUpdateCount()); + } + } catch (SQLException e) { + LOG.warn("SQL Exception: {}", e.getMessage()); + } + return resultSet; + } + + protected static DataSource getDataSource(JdbcDatabaseContainer container) { + HikariConfig hikariConfig = new HikariConfig(); + hikariConfig.setJdbcUrl(container.getJdbcUrl()); + hikariConfig.setUsername(container.getUsername()); + hikariConfig.setPassword(container.getPassword()); + hikariConfig.setDriverClassName(container.getDriverClassName()); + return new HikariDataSource(hikariConfig); + } + + protected Iterator getEventStreamValidate( + JdbcDatabaseContainer container, + ConnectorServiceProto.SourceType sourceType, + ConnectorServiceProto.TableSchema tableSchema, + String databaseName, + String tableName) { + String port = String.valueOf(URI.create(container.getJdbcUrl().substring(5)).getPort()); + ConnectorServiceProto.GetEventStreamRequest req = + ConnectorServiceProto.GetEventStreamRequest.newBuilder() + .setValidate( + ConnectorServiceProto.GetEventStreamRequest.ValidateProperties + .newBuilder() + .setSourceId(0) + .setSourceType(sourceType) + .setTableSchema(tableSchema) + .putProperties("hostname", container.getHost()) + .putProperties("port", port) + .putProperties("username", container.getUsername()) + .putProperties("password", container.getPassword()) + .putProperties("database.name", databaseName) + .putProperties("table.name", tableName) + .putProperties("schema.name", "public") // pg only + .putProperties("slot.name", "orders") // pg only + .putProperties("server.id", "1")) // mysql only + .build(); + Iterator responses = null; + try { + responses = blockingStub.getEventStream(req); + } catch (StatusRuntimeException e) { + fail("RPC failed: {}", e.getStatus()); + } + return responses; + } + + protected Iterator getEventStreamStart( + JdbcDatabaseContainer container, + ConnectorServiceProto.SourceType sourceType, + String databaseName, + String tableName) { + String port = String.valueOf(URI.create(container.getJdbcUrl().substring(5)).getPort()); + ConnectorServiceProto.GetEventStreamRequest req = + ConnectorServiceProto.GetEventStreamRequest.newBuilder() + .setStart( + ConnectorServiceProto.GetEventStreamRequest.StartSource.newBuilder() + .setSourceId(0) + .setSourceType(sourceType) + .setStartOffset("") + .putProperties("hostname", container.getHost()) + .putProperties("port", port) + .putProperties("username", container.getUsername()) + .putProperties("password", container.getPassword()) + .putProperties("database.name", databaseName) + .putProperties("table.name", tableName) + .putProperties("schema.name", "public") // pg only + .putProperties("slot.name", "orders") // pg only + .putProperties("server.id", "1")) // mysql only + .build(); + Iterator responses = null; + try { + responses = blockingStub.getEventStream(req); + } catch (StatusRuntimeException e) { + fail("RPC failed: {}", e.getStatus()); + } + return responses; + } + + // generates an orders.tbl in class path using random data + // if file does not contain 10000 lines + static void genOrdersTable(int numRows) { + String[] orderStatusArr = {"O", "F"}; + String[] orderPriorityArr = {"1-URGENT", "2-HIGH", "3-MEDIUM", "4-NOT SPECIFIED", "5-LOW"}; + String path = + SourceTestClient.class.getProtectionDomain().getCodeSource().getLocation().getFile() + + "orders.tbl"; + try (BufferedReader reader = new BufferedReader(new FileReader(path))) { + int lines = 0; + while (reader.readLine() != null) { + lines++; + } + if (lines == 10000) { + LOG.info("orders.tbl contains 10000 lines, skipping data generation"); + return; + } + } catch (Exception e) { + fail("Runtime Exception: {}", e); + } + Random rand = new Random(); + try (PrintWriter writer = new PrintWriter(path, "UTF-8")) { + for (int i = 1; i <= numRows; i++) { + String custKey = String.valueOf(Math.abs(rand.nextLong())); + String orderStatus = orderStatusArr[rand.nextInt(orderStatusArr.length)]; + String totalPrice = rand.nextInt(1000000) + "." + rand.nextInt(9) + rand.nextInt(9); + String orderDate = + (rand.nextInt(60) + 1970) + + "-" + + String.format("%02d", rand.nextInt(12) + 1) + + "-" + + String.format("%02d", rand.nextInt(28) + 1); + String orderPriority = orderPriorityArr[rand.nextInt(orderPriorityArr.length)]; + String clerk = "Clerk#" + String.format("%09d", rand.nextInt(1024)); + String shipPriority = "0"; + String comment = UUID.randomUUID() + " " + UUID.randomUUID(); + writer.printf( + "%s|%s|%s|%s|%s|%s|%s|%s|%s\n", + i, + custKey, + orderStatus, + totalPrice, + orderDate, + orderPriority, + clerk, + shipPriority, + comment); + } + } catch (Exception e) { + fail("Runtime Exception: {}", e); + } + LOG.info("10000 lines written to orders.tbl"); + } +} diff --git a/java/connector-node/risingwave-source-test/src/test/resources/my.cnf b/java/connector-node/risingwave-source-test/src/test/resources/my.cnf new file mode 100644 index 0000000000000..a6a3175e7ef3d --- /dev/null +++ b/java/connector-node/risingwave-source-test/src/test/resources/my.cnf @@ -0,0 +1,7 @@ +[mysqld] +server-id = 223344 +log_bin = mysql-bin +binlog_format = ROW +binlog_row_image = FULL +expire_logs_days = 10 +secure-file-priv = '/home' diff --git a/java/connector-node/risingwave-source-test/src/test/resources/orders.tbl b/java/connector-node/risingwave-source-test/src/test/resources/orders.tbl new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/java/connector-node/risingwave-source-test/src/test/resources/stored_queries.properties b/java/connector-node/risingwave-source-test/src/test/resources/stored_queries.properties new file mode 100644 index 0000000000000..20acafedebae5 --- /dev/null +++ b/java/connector-node/risingwave-source-test/src/test/resources/stored_queries.properties @@ -0,0 +1,3 @@ +mysql.bin_log=show variables like 'log_bin' +postgres.wal=show wal_level +tpch.create.orders=CREATE TABLE IF NOT EXISTS orders (O_ORDERKEY BIGINT NOT NULL, O_CUSTKEY BIGINT NOT NULL, O_ORDERSTATUS CHAR(1) NOT NULL, O_TOTALPRICE DECIMAL(15, 2) NOT NULL, O_ORDERDATE DATE NOT NULL, O_ORDERPRIORITY CHAR(15) NOT NULL, O_CLERK CHAR(15) NOT NULL, O_SHIPPRIORITY BIGINT NOT NULL, O_COMMENT VARCHAR(79) NOT NULL, PRIMARY KEY (O_ORDERKEY)) \ No newline at end of file diff --git a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Demo.java b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java similarity index 61% rename from java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Demo.java rename to java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java index 0e45e1d15155c..9f4038cf3f9a3 100644 --- a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Demo.java +++ b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/HummockReadDemo.java @@ -1,5 +1,21 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.java.binding; +import static com.risingwave.java.binding.Utils.validateRow; + import com.risingwave.java.utils.MetaClient; import com.risingwave.proto.Catalog.Table; import com.risingwave.proto.Hummock.HummockVersion; @@ -13,7 +29,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; /** Hello world! */ -public class Demo { +public class HummockReadDemo { public static void main(String[] args) { String objectStore = System.getenv("OBJECT_STORE"); String dbName = System.getenv("DB_NAME"); @@ -53,7 +69,7 @@ public static void main(String[] args) { .addAllVnodeIds(vnodeList) .build(); - try (Iterator iter = new Iterator(readPlan)) { + try (HummockIterator iter = new HummockIterator(readPlan)) { int count = 0; while (true) { try (KeyedRow row = iter.next()) { @@ -78,43 +94,4 @@ public static void main(String[] args) { scheduledThreadPool.shutdown(); } - - static void validateRow(KeyedRow row) { - // The validation of row data are according to the data generation rule - // defined in ${REPO_ROOT}/src/java_binding/gen-demo-insert-data.py - short rowIndex = row.getShort(0); - if (row.getInt(1) != rowIndex) { - throw new RuntimeException( - String.format("invalid int value: %s %s", row.getInt(1), rowIndex)); - } - if (row.getLong(2) != rowIndex) { - throw new RuntimeException( - String.format("invalid long value: %s %s", row.getLong(2), rowIndex)); - } - if (row.getFloat(3) != (float) rowIndex) { - throw new RuntimeException( - String.format("invalid float value: %s %s", row.getFloat(3), rowIndex)); - } - if (row.getDouble(4) != (double) rowIndex) { - throw new RuntimeException( - String.format("invalid double value: %s %s", row.getDouble(4), rowIndex)); - } - if (row.getBoolean(5) != (rowIndex % 3 == 0)) { - throw new RuntimeException( - String.format( - "invalid bool value: %s %s", row.getBoolean(5), (rowIndex % 3 == 0))); - } - if (!row.getString(6).equals(((Short) rowIndex).toString().repeat((rowIndex % 10) + 1))) { - throw new RuntimeException( - String.format( - "invalid string value: %s %s", - row.getString(6), - ((Short) rowIndex).toString().repeat((rowIndex % 10) + 1))); - } - if (row.isNull(7) != (rowIndex % 5 == 0)) { - throw new RuntimeException( - String.format( - "invalid isNull value: %s %s", row.isNull(7), (rowIndex % 5 == 0))); - } - } } diff --git a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java new file mode 100644 index 0000000000000..0cc6977de2f0c --- /dev/null +++ b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/StreamChunkDemo.java @@ -0,0 +1,43 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +import static com.risingwave.java.binding.Utils.validateRow; + +import java.io.IOException; + +public class StreamChunkDemo { + + public static void main(String[] args) throws IOException { + byte[] payload = System.in.readAllBytes(); + try (StreamChunkIterator iter = new StreamChunkIterator(payload)) { + int count = 0; + while (true) { + try (StreamChunkRow row = iter.next()) { + if (row == null) { + break; + } + count += 1; + validateRow(row); + } + } + int expectedCount = 30000; + if (count != expectedCount) { + throw new RuntimeException( + String.format("row count is %s, should be %s", count, expectedCount)); + } + } + } +} diff --git a/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Utils.java b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Utils.java new file mode 100644 index 0000000000000..193ba4811bdc1 --- /dev/null +++ b/java/java-binding-integration-test/src/main/java/com/risingwave/java/binding/Utils.java @@ -0,0 +1,56 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +public class Utils { + public static void validateRow(BaseRow row) { + // The validation of row data are according to the data generation rule + // defined in ${REPO_ROOT}/src/java_binding/gen-demo-insert-data.py + short rowIndex = row.getShort(0); + if (row.getInt(1) != rowIndex) { + throw new RuntimeException( + String.format("invalid int value: %s %s", row.getInt(1), rowIndex)); + } + if (row.getLong(2) != rowIndex) { + throw new RuntimeException( + String.format("invalid long value: %s %s", row.getLong(2), rowIndex)); + } + if (row.getFloat(3) != (float) rowIndex) { + throw new RuntimeException( + String.format("invalid float value: %s %s", row.getFloat(3), rowIndex)); + } + if (row.getDouble(4) != (double) rowIndex) { + throw new RuntimeException( + String.format("invalid double value: %s %s", row.getDouble(4), rowIndex)); + } + if (row.getBoolean(5) != (rowIndex % 3 == 0)) { + throw new RuntimeException( + String.format( + "invalid bool value: %s %s", row.getBoolean(5), (rowIndex % 3 == 0))); + } + if (!row.getString(6).equals(((Short) rowIndex).toString().repeat((rowIndex % 10) + 1))) { + throw new RuntimeException( + String.format( + "invalid string value: %s %s", + row.getString(6), + ((Short) rowIndex).toString().repeat((rowIndex % 10) + 1))); + } + if (row.isNull(7) != (rowIndex % 5 == 0)) { + throw new RuntimeException( + String.format( + "invalid isNull value: %s %s", row.isNull(7), (rowIndex % 5 == 0))); + } + } +} diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java new file mode 100644 index 0000000000000..22d55a145deaa --- /dev/null +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/BaseRow.java @@ -0,0 +1,65 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +public class BaseRow implements AutoCloseable { + protected final long pointer; + private boolean isClosed; + + protected BaseRow(long pointer) { + this.pointer = pointer; + this.isClosed = false; + } + + public boolean isNull(int index) { + return Binding.rowIsNull(pointer, index); + } + + public short getShort(int index) { + return Binding.rowGetInt16Value(pointer, index); + } + + public int getInt(int index) { + return Binding.rowGetInt32Value(pointer, index); + } + + public long getLong(int index) { + return Binding.rowGetInt64Value(pointer, index); + } + + public float getFloat(int index) { + return Binding.rowGetFloatValue(pointer, index); + } + + public double getDouble(int index) { + return Binding.rowGetDoubleValue(pointer, index); + } + + public boolean getBoolean(int index) { + return Binding.rowGetBooleanValue(pointer, index); + } + + public String getString(int index) { + return Binding.rowGetStringValue(pointer, index); + } + + @Override + public void close() { + if (!isClosed) { + isClosed = true; + Binding.rowClose(pointer); + } + } +} diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java b/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java index e596603514df0..f4dec3eecb426 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/Binding.java @@ -1,3 +1,17 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package com.risingwave.java.binding; public class Binding { @@ -7,20 +21,22 @@ public class Binding { public static native int vnodeCount(); - // iterator method + // hummock iterator method // Return a pointer to the iterator - static native long iteratorNew(byte[] readPlan); + static native long hummockIteratorNew(byte[] readPlan); // return a pointer to the next row - static native long iteratorNext(long pointer); + static native long hummockIteratorNext(long pointer); // Since the underlying rust does not have garbage collection, we will have to manually call // close on the iterator to release the iterator instance pointed by the pointer. - static native void iteratorClose(long pointer); + static native void hummockIteratorClose(long pointer); // row method static native byte[] rowGetKey(long pointer); + static native int rowGetOp(long pointer); + static native boolean rowIsNull(long pointer, int index); static native short rowGetInt16Value(long pointer, int index); @@ -40,4 +56,11 @@ public class Binding { // Since the underlying rust does not have garbage collection, we will have to manually call // close on the row to release the row instance pointed by the pointer. static native void rowClose(long pointer); + + // stream chunk iterator method + static native long streamChunkIteratorNew(byte[] streamChunkPayload); + + static native long streamChunkIteratorNext(long pointer); + + static native void streamChunkIteratorClose(long pointer); } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java b/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java new file mode 100644 index 0000000000000..ced034fd649d9 --- /dev/null +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/HummockIterator.java @@ -0,0 +1,43 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +import com.risingwave.proto.JavaBinding.ReadPlan; + +public class HummockIterator implements AutoCloseable { + private final long pointer; + private boolean isClosed; + + public HummockIterator(ReadPlan readPlan) { + this.pointer = Binding.hummockIteratorNew(readPlan.toByteArray()); + this.isClosed = false; + } + + public KeyedRow next() { + long pointer = Binding.hummockIteratorNext(this.pointer); + if (pointer == 0) { + return null; + } + return new KeyedRow(pointer); + } + + @Override + public void close() { + if (!isClosed) { + isClosed = true; + Binding.hummockIteratorClose(pointer); + } + } +} diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/Iterator.java b/java/java-binding/src/main/java/com/risingwave/java/binding/Iterator.java deleted file mode 100644 index 5c8d4ebf74efa..0000000000000 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/Iterator.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.risingwave.java.binding; - -import com.risingwave.proto.JavaBinding.ReadPlan; - -public class Iterator implements AutoCloseable { - private final long pointer; - private boolean isClosed; - - public Iterator(ReadPlan readPlan) { - this.pointer = Binding.iteratorNew(readPlan.toByteArray()); - this.isClosed = false; - } - - public KeyedRow next() { - long pointer = Binding.iteratorNext(this.pointer); - if (pointer == 0) { - return null; - } - return new KeyedRow(pointer); - } - - @Override - public void close() { - if (!isClosed) { - isClosed = true; - Binding.iteratorClose(pointer); - } - } -} diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java index 21fbb94da68e5..6bbfdaafebabc 100644 --- a/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/KeyedRow.java @@ -1,55 +1,25 @@ -package com.risingwave.java.binding; +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -public class KeyedRow implements AutoCloseable { - private final long pointer; - private boolean isClosed; +package com.risingwave.java.binding; - KeyedRow(long pointer) { - this.pointer = pointer; - this.isClosed = false; +public class KeyedRow extends BaseRow { + public KeyedRow(long pointer) { + super(pointer); } public byte[] getKey() { return Binding.rowGetKey(pointer); } - - public boolean isNull(int index) { - return Binding.rowIsNull(pointer, index); - } - - public short getShort(int index) { - return Binding.rowGetInt16Value(pointer, index); - } - - public int getInt(int index) { - return Binding.rowGetInt32Value(pointer, index); - } - - public long getLong(int index) { - return Binding.rowGetInt64Value(pointer, index); - } - - public float getFloat(int index) { - return Binding.rowGetFloatValue(pointer, index); - } - - public double getDouble(int index) { - return Binding.rowGetDoubleValue(pointer, index); - } - - public boolean getBoolean(int index) { - return Binding.rowGetBooleanValue(pointer, index); - } - - public String getString(int index) { - return Binding.rowGetStringValue(pointer, index); - } - - @Override - public void close() { - if (!isClosed) { - isClosed = true; - Binding.rowClose(pointer); - } - } } diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java new file mode 100644 index 0000000000000..9d4d71650a82a --- /dev/null +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkIterator.java @@ -0,0 +1,41 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +public class StreamChunkIterator implements AutoCloseable { + private final long pointer; + private boolean isClosed; + + public StreamChunkIterator(byte[] streamChunkPayload) { + this.pointer = Binding.streamChunkIteratorNew(streamChunkPayload); + this.isClosed = false; + } + + public StreamChunkRow next() { + long pointer = Binding.streamChunkIteratorNext(this.pointer); + if (pointer == 0) { + return null; + } + return new StreamChunkRow(pointer); + } + + @Override + public void close() { + if (!isClosed) { + isClosed = true; + Binding.streamChunkIteratorClose(pointer); + } + } +} diff --git a/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java new file mode 100644 index 0000000000000..401d3d98f766d --- /dev/null +++ b/java/java-binding/src/main/java/com/risingwave/java/binding/StreamChunkRow.java @@ -0,0 +1,27 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.risingwave.java.binding; + +import com.risingwave.proto.Data; + +public class StreamChunkRow extends BaseRow { + public StreamChunkRow(long pointer) { + super(pointer); + } + + public Data.Op getOp() { + return Data.Op.forNumber(Binding.rowGetOp(pointer)); + } +} diff --git a/java/pom.xml b/java/pom.xml index f78c968c8d61d..88b87e7ae9efb 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -16,6 +16,7 @@ connector-node/risingwave-sink-deltalake connector-node/risingwave-sink-jdbc connector-node/risingwave-source-cdc + connector-node/risingwave-source-test connector-node/risingwave-connector-service connector-node/assembly @@ -30,9 +31,10 @@ 2.10 1.0-SNAPSHOT 2.27.1 - 1.7.36 2.20.0 1.5.0 + 2.11.0 + 1.10.0 1.9.7.Final 2.13.5 3.3.1 @@ -41,9 +43,9 @@ - org.slf4j - slf4j-api - ${slf4j.version} + org.apache.logging.log4j + log4j-api + ${log4j.version} org.apache.logging.log4j @@ -60,6 +62,16 @@ commons-cli ${commons.cli.version} + + org.apache.commons + commons-text + ${commons.text.version} + + + commons-io + commons-io + ${commons.io.version} + com.fasterxml.jackson.core jackson-databind diff --git a/java/tools/maven/checkstyle.xml b/java/tools/maven/checkstyle.xml index 33649434a3266..4bd0d510e0fad 100644 --- a/java/tools/maven/checkstyle.xml +++ b/java/tools/maven/checkstyle.xml @@ -165,11 +165,6 @@ This file is based on the checkstyle file of Apache Beam. - - - - - index_chain --NoShuffle--> lookup) which will break current - // `NoShuffle` scaling assumption. Currently we detect this case and forbid it to scale. if no_shuffle_source_fragment_ids.contains(fragment_id) { let mut queue: VecDeque<_> = fragment_dispatcher_map .get(fragment_id) @@ -451,21 +447,12 @@ where if let Some(downstream_fragments) = fragment_dispatcher_map.get(&downstream_id) { - // If `NoShuffle` used by other fragment type rather than `ChainNode`, bail. - for downstream_fragment_id in downstream_fragments.keys() { - let downstream_fragment = fragment_map - .get(downstream_fragment_id) - .ok_or_else(|| anyhow!("fragment {fragment_id} does not exist"))?; - if (downstream_fragment.get_fragment_type_mask() - & (FragmentTypeFlag::ChainNode as u32 - | FragmentTypeFlag::Mview as u32)) - == 0 - { - bail!("Rescheduling NoShuffle edge only supports ChainNode and Mview. Other usage for e.g. delta join is forbidden currently."); - } - } + let no_shuffle_downstreams = downstream_fragments + .iter() + .filter(|(_, ty)| **ty == DispatcherType::NoShuffle) + .map(|(fragment_id, _)| fragment_id); - queue.extend(downstream_fragments.keys().cloned()); + queue.extend(no_shuffle_downstreams.copied()); } no_shuffle_reschedule.insert( @@ -743,7 +730,12 @@ where .unwrap(); if let Some(downstream_fragments) = ctx.fragment_dispatcher_map.get(fragment_id) { - for downstream_fragment_id in downstream_fragments.keys() { + let no_shuffle_downstreams = downstream_fragments + .iter() + .filter(|(_, ty)| **ty == DispatcherType::NoShuffle) + .map(|(fragment_id, _)| fragment_id); + + for downstream_fragment_id in no_shuffle_downstreams { arrange_no_shuffle_relation( ctx, downstream_fragment_id, @@ -1014,20 +1006,19 @@ where } } - let downstream_fragment_ids = - if let Some(downstream_fragments) = ctx.fragment_dispatcher_map.get(&fragment_id) { - // Skip NoShuffle fragments' downstream - if ctx - .no_shuffle_source_fragment_ids - .contains(&fragment.fragment_id) - { - vec![] - } else { - downstream_fragments.keys().copied().collect_vec() - } - } else { - vec![] - }; + let downstream_fragment_ids = if let Some(downstream_fragments) = + ctx.fragment_dispatcher_map.get(&fragment_id) + { + // Skip fragments' no-shuffle downstream, as there's no need to update the merger + // (receiver) of a no-shuffle downstream + downstream_fragments + .iter() + .filter(|(_, dispatcher_type)| *dispatcher_type != &DispatcherType::NoShuffle) + .map(|(fragment_id, _)| *fragment_id) + .collect_vec() + } else { + vec![] + }; let vnode_bitmap_updates = match fragment.distribution_type() { FragmentDistributionType::Hash => { @@ -1123,7 +1114,7 @@ where let _source_pause_guard = self.source_manager.paused.lock().await; - tracing::trace!("reschedule plan: {:#?}", reschedule_fragment); + tracing::debug!("reschedule plan: {:#?}", reschedule_fragment); self.barrier_scheduler .run_command_with_paused(Command::RescheduleFragment(reschedule_fragment)) diff --git a/src/meta/src/stream/sink.rs b/src/meta/src/stream/sink.rs new file mode 100644 index 0000000000000..5fd707941e277 --- /dev/null +++ b/src/meta/src/stream/sink.rs @@ -0,0 +1,36 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use risingwave_connector::sink::catalog::SinkCatalog; +use risingwave_connector::sink::{SinkConfig, SinkImpl}; +use risingwave_pb::catalog::PbSink; + +use crate::{MetaError, MetaResult}; + +pub async fn validate_sink( + prost_sink_catalog: &PbSink, + connector_rpc_endpoint: Option, +) -> MetaResult<()> { + let sink_catalog = SinkCatalog::from(prost_sink_catalog); + let mut properties = sink_catalog.properties.clone(); + // Insert a value as the `identifier` field to get parsed by serde. + properties.insert("identifier".to_string(), u64::MAX.to_string()); + let sink_config = SinkConfig::from_hashmap(properties) + .map_err(|err| MetaError::from(anyhow!(err.to_string())))?; + + SinkImpl::validate(sink_config, sink_catalog, connector_rpc_endpoint) + .await + .map_err(|err| MetaError::from(anyhow!(err.to_string()))) +} diff --git a/src/meta/src/stream/source_manager.rs b/src/meta/src/stream/source_manager.rs index 0469768b5fd16..fc5b5bdacd026 100644 --- a/src/meta/src/stream/source_manager.rs +++ b/src/meta/src/stream/source_manager.rs @@ -694,7 +694,7 @@ mod tests { use std::collections::{BTreeMap, HashMap, HashSet}; use anyhow::anyhow; - use bytes::Bytes; + use risingwave_common::array::JsonbVal; use risingwave_connector::source::{SplitId, SplitMetaData}; use serde::{Deserialize, Serialize}; @@ -711,12 +711,12 @@ mod tests { format!("{}", self.id).into() } - fn encode_to_bytes(&self) -> Bytes { - Bytes::from(serde_json::to_string(self).unwrap()) + fn encode_to_json(&self) -> JsonbVal { + serde_json::to_value(*self).unwrap().into() } - fn restore_from_bytes(bytes: &[u8]) -> anyhow::Result { - serde_json::from_slice(bytes).map_err(|e| anyhow!(e)) + fn restore_from_json(value: JsonbVal) -> anyhow::Result { + serde_json::from_value(value.take()).map_err(|e| anyhow!(e)) } } diff --git a/src/meta/src/stream/stream_graph/actor.rs b/src/meta/src/stream/stream_graph/actor.rs index 41315fb78060f..5443c3ec0a678 100644 --- a/src/meta/src/stream/stream_graph/actor.rs +++ b/src/meta/src/stream/stream_graph/actor.rs @@ -170,7 +170,7 @@ impl ActorBuilder { // Index the upstreams by the an external edge ID. let upstreams = &self.upstreams[&EdgeId::UpstreamExternal { - upstream_table_id: chain_node.table_id, + upstream_table_id: chain_node.table_id.into(), downstream_fragment_id: self.fragment_id, }]; @@ -180,28 +180,21 @@ impl ActorBuilder { assert_eq!(upstream_actor_id.len(), 1); let chain_input = vec![ - // Fill the merge node with correct upstream info. + // Fill the merge node body with correct upstream info. StreamNode { - input: vec![], - stream_key: merge_node.stream_key.clone(), node_body: Some(NodeBody::Merge(MergeNode { upstream_actor_id, upstream_fragment_id: upstreams.fragment_id.as_global_id(), upstream_dispatcher_type: DispatcherType::NoShuffle as _, - fields: chain_node.upstream_fields.clone(), + fields: merge_node.fields.clone(), })), - fields: chain_node.upstream_fields.clone(), - operator_id: merge_node.operator_id, - identity: "MergeExecutor".to_string(), - append_only: stream_node.append_only, + ..merge_node.clone() }, batch_plan_node.clone(), ]; Ok(StreamNode { input: chain_input, - identity: "ChainExecutor".to_string(), - fields: chain_node.upstream_fields.clone(), ..stream_node.clone() }) } diff --git a/src/meta/src/stream/stream_graph/fragment.rs b/src/meta/src/stream/stream_graph/fragment.rs index d82b34ed12eb1..6580bdf131172 100644 --- a/src/meta/src/stream/stream_graph/fragment.rs +++ b/src/meta/src/stream/stream_graph/fragment.rs @@ -23,7 +23,6 @@ use itertools::Itertools; use risingwave_common::bail; use risingwave_common::catalog::{generate_internal_table_name_with_type, TableId}; use risingwave_pb::catalog::Table; -use risingwave_pb::meta::table_fragments::fragment::FragmentDistributionType; use risingwave_pb::meta::table_fragments::Fragment; use risingwave_pb::stream_plan::stream_fragment_graph::{ Parallelism, StreamFragment, StreamFragmentEdge as StreamFragmentEdgeProto, @@ -53,6 +52,9 @@ pub(super) struct BuildingFragment { /// The ID of the job if it's materialized in this fragment. table_id: Option, + + /// The required columns of each upstream table. + upstream_table_columns: HashMap>, } impl BuildingFragment { @@ -70,11 +72,13 @@ impl BuildingFragment { }; let internal_tables = Self::fill_internal_tables(&mut fragment, job, table_id_gen); let table_id = Self::fill_job(&mut fragment, job).then(|| job.id()); + let upstream_table_columns = Self::extract_upstream_table_columns(&mut fragment); Self { inner: fragment, internal_tables, table_id, + upstream_table_columns, } } @@ -139,6 +143,28 @@ impl BuildingFragment { has_table } + + /// Extract the required columns (in IDs) of each upstream table. + fn extract_upstream_table_columns( + // TODO: no need to take `&mut` here + fragment: &mut StreamFragment, + ) -> HashMap> { + let mut table_columns = HashMap::new(); + + visit::visit_fragment(fragment, |node_body| { + if let NodeBody::Chain(chain_node) = node_body { + let table_id = chain_node.table_id.into(); + let column_ids = chain_node.upstream_column_ids.clone(); + table_columns + .try_insert(table_id, column_ids) + .expect("currently there should be no two same upstream tables in a fragment"); + } + }); + + assert_eq!(table_columns.len(), fragment.upstream_table_ids.len()); + + table_columns + } } impl Deref for BuildingFragment { @@ -164,7 +190,7 @@ pub(super) enum EdgeId { /// MV on MV. UpstreamExternal { /// The ID of the upstream table or materialized view. - upstream_table_id: u32, + upstream_table_id: TableId, /// The ID of the downstream fragment. downstream_fragment_id: GlobalFragmentId, }, @@ -412,17 +438,34 @@ impl CompleteStreamFragmentGraph { // Build the extra edges between the upstream `Materialize` and the downstream `Chain` of // the new materialized view. for (&id, fragment) in &graph.fragments { - for &upstream_table_id in &fragment.upstream_table_ids { + for (&upstream_table_id, output_columns) in &fragment.upstream_table_columns { let mview_fragment = upstream_mview_fragments - .get(&TableId::new(upstream_table_id)) + .get(&upstream_table_id) .context("upstream materialized view fragment not found")?; let mview_id = GlobalFragmentId::new(mview_fragment.fragment_id); - // TODO: only output the fields that are used by the downstream `Chain`. - // https://github.com/risingwavelabs/risingwave/issues/4529 - let mview_output_indices = { - let nodes = mview_fragment.actors[0].nodes.as_ref().unwrap(); - (0..nodes.fields.len() as u32).collect() + // Resolve the required output columns from the upstream materialized view. + let output_indices = { + let nodes = mview_fragment.actors[0].get_nodes().unwrap(); + let mview_node = nodes.get_node_body().unwrap().as_materialize().unwrap(); + let all_column_ids = mview_node + .get_table() + .unwrap() + .columns + .iter() + .map(|c| c.column_desc.as_ref().unwrap().column_id) + .collect_vec(); + + output_columns + .iter() + .map(|c| { + all_column_ids + .iter() + .position(|&id| id == *c) + .map(|i| i as u32) + }) + .collect::>>() + .context("column not found in the upstream materialized view")? }; let edge = StreamFragmentEdge { @@ -434,8 +477,8 @@ impl CompleteStreamFragmentGraph { // and the downstream `Chain` of the new materialized view. dispatch_strategy: DispatchStrategy { r#type: DispatcherType::NoShuffle as _, - dist_key_indices: vec![], // not used - output_indices: mview_output_indices, + dist_key_indices: vec![], // not used for `NoShuffle` + output_indices, }, }; @@ -602,13 +645,10 @@ impl CompleteStreamFragmentGraph { inner, internal_tables, table_id, + upstream_table_columns: _, } = self.get_fragment(id).into_building().unwrap(); - let distribution_type = if inner.is_singleton { - FragmentDistributionType::Single - } else { - FragmentDistributionType::Hash - } as i32; + let distribution_type = distribution.to_distribution_type() as i32; let state_table_ids = internal_tables .iter() diff --git a/src/meta/src/stream/stream_graph/schedule.rs b/src/meta/src/stream/stream_graph/schedule.rs index ad5309e446629..8361e6b8ef7c7 100644 --- a/src/meta/src/stream/stream_graph/schedule.rs +++ b/src/meta/src/stream/stream_graph/schedule.rs @@ -29,7 +29,9 @@ use rand::thread_rng; use risingwave_common::bail; use risingwave_common::hash::{ParallelUnitId, ParallelUnitMapping}; use risingwave_pb::common::{ActorInfo, ParallelUnit}; -use risingwave_pb::meta::table_fragments::fragment::FragmentDistributionType; +use risingwave_pb::meta::table_fragments::fragment::{ + FragmentDistributionType, PbFragmentDistributionType, +}; use risingwave_pb::stream_plan::DispatcherType::{self, *}; use crate::manager::{WorkerId, WorkerLocations}; @@ -170,6 +172,14 @@ impl Distribution { FragmentDistributionType::Hash => Distribution::Hash(mapping), } } + + /// Convert the distribution to [`PbFragmentDistributionType`]. + pub fn to_distribution_type(&self) -> PbFragmentDistributionType { + match self { + Distribution::Singleton(_) => PbFragmentDistributionType::Single, + Distribution::Hash(_) => PbFragmentDistributionType::Hash, + } + } } /// [`Scheduler`] schedules the distribution of fragments in a stream graph. @@ -272,7 +282,7 @@ impl Scheduler { // Building fragments and Singletons for (&id, fragment) in graph.building_fragments() { facts.push(Fact::Fragment(id)); - if fragment.is_singleton { + if fragment.requires_singleton { facts.push(Fact::SingletonReq(id)); } } diff --git a/src/meta/src/stream/stream_manager.rs b/src/meta/src/stream/stream_manager.rs index b887baac4995d..7db0d0119359d 100644 --- a/src/meta/src/stream/stream_manager.rs +++ b/src/meta/src/stream/stream_manager.rs @@ -668,7 +668,7 @@ mod tests { } impl MockServices { - async fn start(host: &str, port: u16) -> MetaResult { + async fn start(host: &str, port: u16, enable_recovery: bool) -> MetaResult { let addr = SocketAddr::new(host.parse().unwrap(), port); let state = Arc::new(FakeFragmentState { actor_streams: Mutex::new(HashMap::new()), @@ -692,7 +692,7 @@ mod tests { sleep(Duration::from_secs(1)).await; - let env = MetaSrvEnv::for_test_opts(Arc::new(MetaOpts::test(true))).await; + let env = MetaSrvEnv::for_test_opts(Arc::new(MetaOpts::test(enable_recovery))).await; let system_params = env.system_params_manager().get_params().await; let meta_metrics = Arc::new(MetaMetrics::new()); let cluster_manager = @@ -868,7 +868,7 @@ mod tests { #[tokio::test] async fn test_drop_materialized_view() -> MetaResult<()> { - let services = MockServices::start("127.0.0.1", 12334).await?; + let services = MockServices::start("127.0.0.1", 12334, false).await?; let table_id = TableId::new(0); let actors = make_mview_stream_actors(&table_id, 4); @@ -926,7 +926,7 @@ mod tests { async fn test_failpoints_drop_mv_recovery() { let inject_barrier_err = "inject_barrier_err"; let inject_barrier_err_success = "inject_barrier_err_success"; - let services = MockServices::start("127.0.0.1", 12335).await.unwrap(); + let services = MockServices::start("127.0.0.1", 12335, true).await.unwrap(); let table_id = TableId::new(0); let actors = make_mview_stream_actors(&table_id, 4); diff --git a/src/meta/src/stream/test_fragmenter.rs b/src/meta/src/stream/test_fragmenter.rs index b57c106139509..616f6c38d7a4b 100644 --- a/src/meta/src/stream/test_fragmenter.rs +++ b/src/meta/src/stream/test_fragmenter.rs @@ -18,14 +18,16 @@ use std::vec; use itertools::Itertools; use risingwave_common::catalog::{DatabaseId, SchemaId, TableId}; -use risingwave_pb::catalog::Table as ProstTable; -use risingwave_pb::common::{ParallelUnit, PbColumnOrder, PbDirection, PbOrderType, WorkerNode}; +use risingwave_pb::catalog::PbTable; +use risingwave_pb::common::{ + ParallelUnit, PbColumnOrder, PbDirection, PbNullsAre, PbOrderType, WorkerNode, +}; use risingwave_pb::data::data_type::TypeName; use risingwave_pb::data::DataType; use risingwave_pb::expr::agg_call::Type; use risingwave_pb::expr::expr_node::RexNode; use risingwave_pb::expr::expr_node::Type::{Add, GreaterThan, InputRef}; -use risingwave_pb::expr::{AggCall, ExprNode, FunctionCall, InputRef as ProstInputRef}; +use risingwave_pb::expr::{AggCall, ExprNode, FunctionCall, PbInputRef}; use risingwave_pb::plan_common::{ColumnCatalog, ColumnDesc, Field}; use risingwave_pb::stream_plan::stream_fragment_graph::{StreamFragment, StreamFragmentEdge}; use risingwave_pb::stream_plan::stream_node::NodeBody; @@ -56,7 +58,7 @@ fn make_inputref(idx: u32) -> ExprNode { fn make_sum_aggcall(idx: u32) -> AggCall { AggCall { r#type: Type::Sum as i32, - args: vec![ProstInputRef { + args: vec![PbInputRef { index: idx, r#type: Some(DataType { type_name: TypeName::Int64 as i32, @@ -96,6 +98,7 @@ fn make_column_order(column_index: u32) -> PbColumnOrder { column_index, order_type: Some(PbOrderType { direction: PbDirection::Ascending as _, + nulls_are: PbNullsAre::Largest as _, }), } } @@ -114,12 +117,12 @@ fn make_column(column_type: TypeName, column_id: i32) -> ColumnCatalog { } } -fn make_source_internal_table(id: u32) -> ProstTable { +fn make_source_internal_table(id: u32) -> PbTable { let columns = vec![ make_column(TypeName::Varchar, 0), make_column(TypeName::Varchar, 1), ]; - ProstTable { + PbTable { id, schema_id: SchemaId::placeholder().schema_id, database_id: DatabaseId::placeholder().database_id, @@ -129,18 +132,19 @@ fn make_source_internal_table(id: u32) -> ProstTable { column_index: 0, order_type: Some(PbOrderType { direction: PbDirection::Descending as _, + nulls_are: PbNullsAre::Largest as _, }), }], ..Default::default() } } -fn make_internal_table(id: u32, is_agg_value: bool) -> ProstTable { +fn make_internal_table(id: u32, is_agg_value: bool) -> PbTable { let mut columns = vec![make_column(TypeName::Int64, 0)]; if !is_agg_value { columns.push(make_column(TypeName::Int32, 1)); } - ProstTable { + PbTable { id, schema_id: SchemaId::placeholder().schema_id, database_id: DatabaseId::placeholder().database_id, @@ -150,6 +154,7 @@ fn make_internal_table(id: u32, is_agg_value: bool) -> ProstTable { column_index: 0, order_type: Some(PbOrderType { direction: PbDirection::Descending as _, + nulls_are: PbNullsAre::Largest as _, }), }], stream_key: vec![2], @@ -157,8 +162,8 @@ fn make_internal_table(id: u32, is_agg_value: bool) -> ProstTable { } } -fn make_empty_table(id: u32) -> ProstTable { - ProstTable { +fn make_empty_table(id: u32) -> PbTable { + PbTable { id, schema_id: SchemaId::placeholder().schema_id, database_id: DatabaseId::placeholder().database_id, @@ -170,7 +175,7 @@ fn make_empty_table(id: u32) -> ProstTable { } } -fn make_materialize_table(id: u32) -> ProstTable { +fn make_materialize_table(id: u32) -> PbTable { make_internal_table(id, true) } @@ -209,7 +214,7 @@ fn make_stream_fragments() -> Vec { fragment_id: 2, node: Some(source_node), fragment_type_mask: FragmentTypeFlag::Source as u32, - is_singleton: false, + requires_singleton: false, table_ids_cnt: 0, upstream_table_ids: vec![], }); @@ -280,7 +285,7 @@ fn make_stream_fragments() -> Vec { fragment_id: 1, node: Some(simple_agg_node), fragment_type_mask: FragmentTypeFlag::FragmentUnspecified as u32, - is_singleton: false, + requires_singleton: false, table_ids_cnt: 0, upstream_table_ids: vec![], }); @@ -368,7 +373,7 @@ fn make_stream_fragments() -> Vec { fragment_id: 0, node: Some(mview_node), fragment_type_mask: FragmentTypeFlag::Mview as u32, - is_singleton: true, + requires_singleton: true, table_ids_cnt: 0, upstream_table_ids: vec![], }); diff --git a/src/meta/src/stream/test_scale.rs b/src/meta/src/stream/test_scale.rs index d3a42a3c2432c..0ad2f51268db0 100644 --- a/src/meta/src/stream/test_scale.rs +++ b/src/meta/src/stream/test_scale.rs @@ -143,7 +143,7 @@ mod tests { for parallel_unit_num in simulated_parallel_unit_nums(None, None) { let (actor_mapping, _) = generate_actor_mapping(parallel_unit_num); - let actor_to_parallel_unit_map = (0..parallel_unit_num) + let actor_to_parallel_unit_map: HashMap<_, _> = (0..parallel_unit_num) .map(|i| (i as ActorId, i as ParallelUnitId)) .collect(); let parallel_unit_mapping = actor_mapping.to_parallel_unit(&actor_to_parallel_unit_map); diff --git a/src/meta/src/telemetry.rs b/src/meta/src/telemetry.rs new file mode 100644 index 0000000000000..a2b69ad4cf844 --- /dev/null +++ b/src/meta/src/telemetry.rs @@ -0,0 +1,160 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use anyhow::anyhow; +use risingwave_common::telemetry::report::{TelemetryInfoFetcher, TelemetryReportCreator}; +use risingwave_common::telemetry::{ + current_timestamp, SystemData, TelemetryNodeType, TelemetryReport, TelemetryReportBase, +}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::storage::MetaStore; + +/// Column in meta store +pub const TELEMETRY_CF: &str = "cf/telemetry"; +/// `telemetry` in bytes +pub const TELEMETRY_KEY: &[u8] = &[74, 65, 0x6c, 65, 0x6d, 65, 74, 72, 79]; + +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct MetaTelemetryReport { + #[serde(flatten)] + base: TelemetryReportBase, +} + +impl MetaTelemetryReport { + pub(crate) fn new(tracking_id: String, session_id: String, up_time: u64) -> Self { + Self { + base: TelemetryReportBase { + tracking_id, + session_id, + system_data: SystemData::new(), + up_time, + time_stamp: current_timestamp(), + node_type: TelemetryNodeType::Meta, + }, + } + } +} + +impl TelemetryReport for MetaTelemetryReport { + fn to_json(&self) -> anyhow::Result { + let json = serde_json::to_string(self)?; + Ok(json) + } +} + +pub(crate) struct MetaTelemetryInfoFetcher { + meta_store: Arc, +} + +impl MetaTelemetryInfoFetcher { + pub(crate) fn new(meta_store: Arc) -> Self { + Self { meta_store } + } +} + +#[async_trait::async_trait] +impl TelemetryInfoFetcher for MetaTelemetryInfoFetcher { + async fn fetch_telemetry_info(&self) -> anyhow::Result { + let tracking_id = get_or_create_tracking_id(self.meta_store.clone()).await?; + + Ok(tracking_id) + } +} + +/// fetch or create a `tracking_id` from etcd +async fn get_or_create_tracking_id( + meta_store: Arc, +) -> Result { + match meta_store.get_cf(TELEMETRY_CF, TELEMETRY_KEY).await { + Ok(bytes) => String::from_utf8(bytes).map_err(|e| anyhow!("failed to parse uuid, {}", e)), + Err(_) => { + let uuid = Uuid::new_v4().to_string(); + // put new uuid in meta store + match meta_store + .put_cf( + TELEMETRY_CF, + TELEMETRY_KEY.to_vec(), + uuid.clone().into_bytes(), + ) + .await + { + Err(e) => Err(anyhow!("failed to create uuid, {}", e)), + Ok(_) => Ok(uuid), + } + } + } +} + +#[derive(Copy, Clone)] +pub(crate) struct MetaReportCreator {} + +impl MetaReportCreator { + pub(crate) fn new() -> Self { + Self {} + } +} + +impl TelemetryReportCreator for MetaReportCreator { + fn create_report( + &self, + tracking_id: String, + session_id: String, + up_time: u64, + ) -> anyhow::Result { + Ok(MetaTelemetryReport::new(tracking_id, session_id, up_time)) + } + + fn report_type(&self) -> &str { + "meta" + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::*; + use crate::storage::MemStore; + + #[tokio::test] + async fn test_get_or_create_tracking_id_existing_id() { + let meta_store = Arc::new(MemStore::new()); + let uuid = Uuid::new_v4().to_string(); + meta_store + .put_cf( + TELEMETRY_CF, + TELEMETRY_KEY.to_vec(), + uuid.clone().into_bytes(), + ) + .await + .unwrap(); + let result = get_or_create_tracking_id(Arc::clone(&meta_store)) + .await + .unwrap(); + assert_eq!(result, uuid); + } + + #[tokio::test] + async fn test_get_or_create_tracking_id_new_id() { + let meta_store = Arc::new(MemStore::new()); + let result = get_or_create_tracking_id(Arc::clone(&meta_store)) + .await + .unwrap(); + assert!(String::from_utf8(result.into_bytes()).is_ok()); + } +} diff --git a/src/object_store/Cargo.toml b/src/object_store/Cargo.toml index 76f0cb1524027..59544a0077754 100644 --- a/src/object_store/Cargo.toml +++ b/src/object_store/Cargo.toml @@ -21,7 +21,7 @@ fail = "0.5" futures = { version = "0.3", default-features = false, features = ["alloc"] } hyper = "0.14" itertools = "0.10" -opendal = "0.27.2" +opendal = "0.30" prometheus = { version = "0.13", features = ["process"] } random-string = "1.0" risingwave_common = { path = "../common" } diff --git a/src/object_store/src/object/mod.rs b/src/object_store/src/object/mod.rs index 8fd2b810b064e..26efba364f8a6 100644 --- a/src/object_store/src/object/mod.rs +++ b/src/object_store/src/object/mod.rs @@ -857,6 +857,24 @@ pub async fn parse_remote_object_store( .monitored(metrics), ) } + azblob if azblob.starts_with("azblob://") => { + let azblob = azblob.strip_prefix("azblob://").unwrap(); + let (container_name, root) = azblob.split_once('@').unwrap(); + ObjectStoreImpl::Opendal( + OpendalObjectStore::new_azblob_engine(container_name.to_string(), root.to_string()) + .unwrap() + .monitored(metrics), + ) + } + fs if fs.starts_with("fs://") => { + let fs = fs.strip_prefix("fs://").unwrap(); + let (_, root) = fs.split_once('@').unwrap(); + ObjectStoreImpl::Opendal( + OpendalObjectStore::new_fs_engine(root.to_string()) + .unwrap() + .monitored(metrics), + ) + } s3_compatible if s3_compatible.starts_with("s3-compatible://") => { ObjectStoreImpl::S3Compatible( S3ObjectStore::new_s3_compatible( diff --git a/src/object_store/src/object/opendal_engine/azblob.rs b/src/object_store/src/object/opendal_engine/azblob.rs new file mode 100644 index 0000000000000..f13075648d109 --- /dev/null +++ b/src/object_store/src/object/opendal_engine/azblob.rs @@ -0,0 +1,44 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use opendal::services::Azblob; +use opendal::Operator; + +use super::{EngineType, OpendalObjectStore}; +use crate::object::ObjectResult; +impl OpendalObjectStore { + /// create opendal azblob engine. + pub fn new_azblob_engine(container_name: String, root: String) -> ObjectResult { + // Create azblob backend builder. + let mut builder = Azblob::default(); + builder.root(&root); + builder.container(&container_name); + + let endpoint = std::env::var("AZBLOB_ENDPOINT") + .unwrap_or_else(|_| panic!("AZBLOB_ENDPOINT not found from environment variables")); + let account_name = std::env::var("AZBLOB_ACCOUNT_NAME") + .unwrap_or_else(|_| panic!("AZBLOB_ACCOUNT_NAME not found from environment variables")); + let account_key = std::env::var("AZBLOB_ACCOUNT_KEY") + .unwrap_or_else(|_| panic!("AZBLOB_ACCOUNT_KEY not found from environment variables")); + + builder.endpoint(&endpoint); + builder.account_name(&account_name); + builder.account_key(&account_key); + let op: Operator = Operator::new(builder)?.finish(); + Ok(Self { + op, + engine_type: EngineType::Azblob, + }) + } +} diff --git a/src/object_store/src/object/opendal_engine/fs.rs b/src/object_store/src/object/opendal_engine/fs.rs new file mode 100644 index 0000000000000..cf4c97c1cd507 --- /dev/null +++ b/src/object_store/src/object/opendal_engine/fs.rs @@ -0,0 +1,34 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use opendal::services::Fs; +use opendal::Operator; + +use super::{EngineType, OpendalObjectStore}; +use crate::object::ObjectResult; +impl OpendalObjectStore { + /// create opendal fs engine. + pub fn new_fs_engine(root: String) -> ObjectResult { + // Create fs backend builder. + let mut builder = Fs::default(); + + builder.root(&root); + + let op: Operator = Operator::new(builder)?.finish(); + Ok(Self { + op, + engine_type: EngineType::Fs, + }) + } +} diff --git a/src/object_store/src/object/opendal_engine/gcs.rs b/src/object_store/src/object/opendal_engine/gcs.rs index 3165675f85760..75d48d35cbd78 100644 --- a/src/object_store/src/object/opendal_engine/gcs.rs +++ b/src/object_store/src/object/opendal_engine/gcs.rs @@ -27,7 +27,7 @@ impl OpendalObjectStore { builder.root(&root); - let op: Operator = Operator::create(builder)?.finish(); + let op: Operator = Operator::new(builder)?.finish(); Ok(Self { op, engine_type: EngineType::Gcs, diff --git a/src/object_store/src/object/opendal_engine/mod.rs b/src/object_store/src/object/opendal_engine/mod.rs index 0f5002c22bbbe..1e93904eb73b2 100644 --- a/src/object_store/src/object/opendal_engine/mod.rs +++ b/src/object_store/src/object/opendal_engine/mod.rs @@ -26,3 +26,7 @@ pub mod gcs; pub use gcs::*; pub mod oss; pub use oss::*; +pub mod azblob; +pub use azblob::*; +pub mod fs; +pub use fs::*; diff --git a/src/object_store/src/object/opendal_engine/opendal_object_store.rs b/src/object_store/src/object/opendal_engine/opendal_object_store.rs index 21cf8996fea4e..b5f78d9ef9510 100644 --- a/src/object_store/src/object/opendal_engine/opendal_object_store.rs +++ b/src/object_store/src/object/opendal_engine/opendal_object_store.rs @@ -18,7 +18,7 @@ use futures::future::try_join_all; use futures::StreamExt; use itertools::Itertools; use opendal::services::Memory; -use opendal::Operator; +use opendal::{Metakey, Operator}; use tokio::io::AsyncRead; use crate::object::{ @@ -39,6 +39,8 @@ pub enum EngineType { Gcs, Oss, Webhdfs, + Azblob, + Fs, } impl OpendalObjectStore { @@ -47,7 +49,7 @@ impl OpendalObjectStore { // Create memory backend builder. let builder = Memory::default(); - let op: Operator = Operator::create(builder)?.finish(); + let op: Operator = Operator::new(builder)?.finish(); Ok(Self { op, engine_type: EngineType::Memory, @@ -65,7 +67,7 @@ impl ObjectStore for OpendalObjectStore { if obj.is_empty() { Err(ObjectError::internal("upload empty object")) } else { - self.op.object(path).write(obj).await?; + self.op.write(path, obj).await?; Ok(()) } } @@ -81,7 +83,7 @@ impl ObjectStore for OpendalObjectStore { match block { Some(block) => { let range = block.offset as u64..(block.offset + block.size) as u64; - let res = Bytes::from(self.op.object(path).range_read(range).await?); + let res = Bytes::from(self.op.range_read(path, range).await?); if block.size != res.len() { Err(ObjectError::internal("bad block offset and size")) @@ -89,7 +91,7 @@ impl ObjectStore for OpendalObjectStore { Ok(res) } } - None => Ok(Bytes::from(self.op.object(path).read().await?)), + None => Ok(Bytes::from(self.op.read(path).await?)), } } @@ -114,20 +116,15 @@ impl ObjectStore for OpendalObjectStore { )); let reader = match start_pos { - Some(start_position) => { - self.op - .object(path) - .range_reader(start_position as u64..) - .await? - } - None => self.op.object(path).reader().await?, + Some(start_position) => self.op.range_reader(path, start_position as u64..).await?, + None => self.op.reader(path).await?, }; Ok(Box::new(reader)) } async fn metadata(&self, path: &str) -> ObjectResult { - let opendal_metadata = self.op.object(path).metadata().await?; + let opendal_metadata = self.op.stat(path).await?; let key = path.to_string(); let last_modified = match opendal_metadata.last_modified() { Some(t) => t.unix_timestamp() as f64, @@ -144,24 +141,28 @@ impl ObjectStore for OpendalObjectStore { } async fn delete(&self, path: &str) -> ObjectResult<()> { - self.op.object(path).delete().await?; + self.op.delete(path).await?; Ok(()) } /// Deletes the objects with the given paths permanently from the storage. If an object /// specified in the request is not found, it will be considered as successfully deleted. async fn delete_objects(&self, paths: &[String]) -> ObjectResult<()> { - self.op.batch().remove(paths.to_vec()).await?; + self.op.remove(paths.to_vec()).await?; Ok(()) } async fn list(&self, prefix: &str) -> ObjectResult> { - let mut object_lister = self.op.object(prefix).list().await?; + let mut object_lister = self.op.list(prefix).await?; let mut metadata_list = vec![]; while let Some(obj) = object_lister.next().await { let object = obj?; let key = prefix.to_string(); - let om = object.metadata().await?; + + let om = self + .op + .metadata(&object, Metakey::LastModified | Metakey::ContentLength) + .await?; let last_modified = match om.last_modified() { Some(t) => t.unix_timestamp() as f64, @@ -186,6 +187,8 @@ impl ObjectStore for OpendalObjectStore { EngineType::Gcs => "Gcs", EngineType::Oss => "Oss", EngineType::Webhdfs => "Webhdfs", + EngineType::Azblob => "Azblob", + EngineType::Fs => "Fs", } } } @@ -213,7 +216,7 @@ impl StreamingUploader for OpenDalStreamingUploader { } async fn finish(mut self: Box) -> ObjectResult<()> { - self.op.object(&self.path).write(self.buffer).await?; + self.op.write(&self.path, self.buffer).await?; Ok(()) } diff --git a/src/object_store/src/object/opendal_engine/oss.rs b/src/object_store/src/object/opendal_engine/oss.rs index 567a1051dd232..5b98ceda28fa4 100644 --- a/src/object_store/src/object/opendal_engine/oss.rs +++ b/src/object_store/src/object/opendal_engine/oss.rs @@ -38,7 +38,7 @@ impl OpendalObjectStore { builder.endpoint(&endpoint); builder.access_key_id(&access_key_id); builder.access_key_secret(&access_key_secret); - let op: Operator = Operator::create(builder)?.finish(); + let op: Operator = Operator::new(builder)?.finish(); Ok(Self { op, engine_type: EngineType::Oss, diff --git a/src/object_store/src/object/opendal_engine/webhdfs.rs b/src/object_store/src/object/opendal_engine/webhdfs.rs index 806a5881639cf..d3af996cbac04 100644 --- a/src/object_store/src/object/opendal_engine/webhdfs.rs +++ b/src/object_store/src/object/opendal_engine/webhdfs.rs @@ -29,7 +29,7 @@ impl OpendalObjectStore { // NOTE: the root must be absolute path. builder.root(&root); - let op: Operator = Operator::create(builder)?.finish(); + let op: Operator = Operator::new(builder)?.finish(); Ok(Self { op, engine_type: EngineType::Webhdfs, diff --git a/src/prost/Cargo.toml b/src/prost/Cargo.toml index 1b1c98d5ca282..4547a0decf2de 100644 --- a/src/prost/Cargo.toml +++ b/src/prost/Cargo.toml @@ -8,6 +8,7 @@ license = { workspace = true } repository = { workspace = true } [dependencies] +enum-as-inner = "0.5" pbjson = "0.5" prost = "0.11" prost-helpers = { path = "helpers" } diff --git a/src/prost/build.rs b/src/prost/build.rs index bdf1702f56712..05f1d2c4e70f6 100644 --- a/src/prost/build.rs +++ b/src/prost/build.rs @@ -55,6 +55,7 @@ fn main() -> Result<(), Box> { .compile_well_known_types(true) .protoc_arg("--experimental_allow_proto3_optional") .type_attribute(".", "#[derive(prost_helpers::AnyPB)]") + .type_attribute("node_body", "#[derive(::enum_as_inner::EnumAsInner)]") .type_attribute("catalog.WatermarkDesc", "#[derive(Eq, Hash)]") .type_attribute("expr.ExprNode", "#[derive(Eq, Hash)]") .type_attribute("data.DataType", "#[derive(Eq, Hash)]") @@ -64,6 +65,7 @@ fn main() -> Result<(), Box> { .type_attribute("expr.FunctionCall", "#[derive(Eq, Hash)]") .type_attribute("expr.UserDefinedFunction", "#[derive(Eq, Hash)]") .type_attribute("catalog.StreamSourceInfo", "#[derive(Eq, Hash)]") + .type_attribute("plan_common.GeneratedColumnDesc", "#[derive(Eq, Hash)]") .out_dir(out_dir.as_path()) .compile(&protos, &[proto_dir.to_string()]) .expect("Failed to compile grpc!"); diff --git a/src/prost/helpers/src/generate.rs b/src/prost/helpers/src/generate.rs index 7fe949131ca6f..a689f7e00a0ff 100644 --- a/src/prost/helpers/src/generate.rs +++ b/src/prost/helpers/src/generate.rs @@ -93,11 +93,11 @@ pub fn implement(field: &Field) -> TokenStream2 { if let Some(enum_type) = extract_enum_type_from_field(field) { return quote! { #[inline(always)] - pub fn #getter_fn_name(&self) -> std::result::Result<#enum_type, crate::ProstFieldNotFound> { + pub fn #getter_fn_name(&self) -> std::result::Result<#enum_type, crate::PbFieldNotFound> { if self.#field_name.eq(&0) { - return Err(crate::ProstFieldNotFound(stringify!(#field_name))); + return Err(crate::PbFieldNotFound(stringify!(#field_name))); } - #enum_type::from_i32(self.#field_name).ok_or_else(|| crate::ProstFieldNotFound(stringify!(#field_name))) + #enum_type::from_i32(self.#field_name).ok_or_else(|| crate::PbFieldNotFound(stringify!(#field_name))) } }; }; @@ -110,8 +110,8 @@ pub fn implement(field: &Field) -> TokenStream2 { let ty = extract_type_from_option(data_type); return quote! { #[inline(always)] - pub fn #getter_fn_name(&self) -> std::result::Result<&#ty, crate::ProstFieldNotFound> { - self.#field_name.as_ref().ok_or_else(|| crate::ProstFieldNotFound(stringify!(#field_name))) + pub fn #getter_fn_name(&self) -> std::result::Result<&#ty, crate::PbFieldNotFound> { + self.#field_name.as_ref().ok_or_else(|| crate::PbFieldNotFound(stringify!(#field_name))) } }; } else if ["u32", "u64", "f32", "f64", "i32", "i64", "bool"] diff --git a/src/prost/src/lib.rs b/src/prost/src/lib.rs index e8ad3c7f770d7..a6669186cd3f9 100644 --- a/src/prost/src/lib.rs +++ b/src/prost/src/lib.rs @@ -142,10 +142,10 @@ pub mod backup_service_serde; pub mod java_binding_serde; #[derive(Clone, PartialEq, Eq, Debug)] -pub struct ProstFieldNotFound(pub &'static str); +pub struct PbFieldNotFound(pub &'static str); -impl From for tonic::Status { - fn from(e: ProstFieldNotFound) -> Self { +impl From for tonic::Status { + fn from(e: PbFieldNotFound) -> Self { tonic::Status::new(tonic::Code::Internal, e.0) } } diff --git a/src/risedevtool/connector.toml b/src/risedevtool/connector.toml index 0d826f4edd948..1336e808d2c10 100644 --- a/src/risedevtool/connector.toml +++ b/src/risedevtool/connector.toml @@ -49,12 +49,12 @@ description = "Download Maven" script = ''' #!/usr/bin/env bash -if !(command -v javac &> /dev/null && [[ "$(javac -version 2>&1 | awk '{print $2}')" =~ "11" ]]); then - echo "JDK 11 is not installed. Please install JDK 11 first." +if !(command -v javac &> /dev/null && [[ "$(javac -version 2>&1 | awk '{print $2}')" =~ ^(11|17) ]]); then + echo "JDK 11+ is not installed. Please install JDK 11+ first." exit 1 fi -if command -v mvn &> /dev/null; then +if (command -v mvn &> /dev/null) || [ -d "${PREFIX_BIN}/maven" ]; then exit 0 else echo "Maven is not installed. Downloading now..." diff --git a/src/risedevtool/src/compose.rs b/src/risedevtool/src/compose.rs index 73271c9c34718..afeb67d1b4b76 100644 --- a/src/risedevtool/src/compose.rs +++ b/src/risedevtool/src/compose.rs @@ -154,11 +154,7 @@ fn health_check_port(port: u16) -> HealthCheck { impl Compose for ComputeNodeConfig { fn compose(&self, config: &ComposeConfig) -> Result { let mut command = Command::new("compute-node"); - ComputeNodeService::apply_command_args( - &mut command, - self, - HummockInMemoryStrategy::Disallowed, - )?; + ComputeNodeService::apply_command_args(&mut command, self)?; if self.enable_tiered_cache { command.arg("--file-cache-dir").arg("/filecache"); } @@ -201,7 +197,11 @@ impl Compose for ComputeNodeConfig { impl Compose for MetaNodeConfig { fn compose(&self, config: &ComposeConfig) -> Result { let mut command = Command::new("meta-node"); - MetaNodeService::apply_command_args(&mut command, self)?; + MetaNodeService::apply_command_args( + &mut command, + self, + HummockInMemoryStrategy::Disallowed, + )?; if let Some(c) = &config.rw_config_path { let target = Path::new(&config.config_directory).join("risingwave.toml"); @@ -264,11 +264,7 @@ impl Compose for FrontendConfig { impl Compose for CompactorConfig { fn compose(&self, config: &ComposeConfig) -> Result { let mut command = Command::new("compactor-node"); - CompactorService::apply_command_args( - &mut command, - self, - HummockInMemoryStrategy::Disallowed, - )?; + CompactorService::apply_command_args(&mut command, self)?; if let Some(c) = &config.rw_config_path { let target = Path::new(&config.config_directory).join("risingwave.toml"); diff --git a/src/risedevtool/src/service_config.rs b/src/risedevtool/src/service_config.rs index 4c3c613e487eb..95081086ed2cf 100644 --- a/src/risedevtool/src/service_config.rs +++ b/src/risedevtool/src/service_config.rs @@ -36,12 +36,12 @@ pub struct ComputeNodeConfig { pub provide_opendal: Option>, pub provide_aws_s3: Option>, pub provide_jaeger: Option>, - pub provide_compactor: Option>, pub user_managed: bool, - pub enable_in_memory_kv_state_backend: bool, pub connector_rpc_endpoint: String, pub total_memory_bytes: usize, + pub memory_control_policy: String, + pub streaming_memory_proportion: f64, pub parallelism: usize, } @@ -65,6 +65,14 @@ pub struct MetaNodeConfig { pub connector_rpc_endpoint: String, pub provide_etcd_backend: Option>, pub provide_prometheus: Option>, + + pub provide_compute_node: Option>, + pub provide_compactor: Option>, + + pub provide_aws_s3: Option>, + pub provide_minio: Option>, + pub provide_opendal: Option>, + pub enable_in_memory_kv_state_backend: bool, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -101,8 +109,6 @@ pub struct CompactorConfig { pub exporter_port: u16, pub provide_minio: Option>, - pub provide_opendal: Option>, - pub provide_aws_s3: Option>, pub provide_meta_node: Option>, pub user_managed: bool, diff --git a/src/risedevtool/src/task/compactor_service.rs b/src/risedevtool/src/task/compactor_service.rs index 8d11854416f33..76a2b205e6283 100644 --- a/src/risedevtool/src/task/compactor_service.rs +++ b/src/risedevtool/src/task/compactor_service.rs @@ -17,13 +17,10 @@ use std::io::Write; use std::path::Path; use std::process::Command; -use anyhow::{anyhow, Result}; +use anyhow::Result; use crate::util::{get_program_args, get_program_env_cmd, get_program_name}; -use crate::{ - add_meta_node, add_storage_backend, CompactorConfig, ExecuteContext, HummockInMemoryStrategy, - Task, -}; +use crate::{add_meta_node, CompactorConfig, ExecuteContext, Task}; pub struct CompactorService { config: CompactorConfig, @@ -45,19 +42,7 @@ impl CompactorService { } /// Apply command args according to config - pub fn apply_command_args( - cmd: &mut Command, - config: &CompactorConfig, - hummock_in_memory_strategy: HummockInMemoryStrategy, - ) -> Result<()> { - if matches!( - hummock_in_memory_strategy, - HummockInMemoryStrategy::Isolated - ) { - return Err(anyhow!( - "compactor cannot use in-memory hummock if remote object store is not provided" - )); - } + pub fn apply_command_args(cmd: &mut Command, config: &CompactorConfig) -> Result<()> { cmd.arg("--listen-addr") .arg(format!("{}:{}", config.listen_address, config.port)) .arg("--prometheus-listener-addr") @@ -78,18 +63,6 @@ impl CompactorService { .arg(format!("{}", compaction_worker_threads_number)); } - let provide_minio = config.provide_minio.as_ref().unwrap(); - let provide_aws_s3 = config.provide_aws_s3.as_ref().unwrap(); - let provide_opendal = config.provide_opendal.as_ref().unwrap(); - add_storage_backend( - &config.id, - provide_opendal, - provide_minio, - provide_aws_s3, - hummock_in_memory_strategy, - cmd, - )?; - let provide_meta_node = config.provide_meta_node.as_ref().unwrap(); add_meta_node(provide_meta_node, cmd)?; @@ -124,7 +97,7 @@ impl Task for CompactorService { cmd.arg("--config-path") .arg(Path::new(&prefix_config).join("risingwave.toml")); - Self::apply_command_args(&mut cmd, &self.config, HummockInMemoryStrategy::Disallowed)?; + Self::apply_command_args(&mut cmd, &self.config)?; if !self.config.user_managed { ctx.run_command(ctx.tmux_run(cmd)?)?; diff --git a/src/risedevtool/src/task/compute_node_service.rs b/src/risedevtool/src/task/compute_node_service.rs index 1c6a58c1eef73..53db91e40e563 100644 --- a/src/risedevtool/src/task/compute_node_service.rs +++ b/src/risedevtool/src/task/compute_node_service.rs @@ -20,7 +20,7 @@ use anyhow::{anyhow, Result}; use super::{ExecuteContext, Task}; use crate::util::{get_program_args, get_program_env_cmd, get_program_name}; -use crate::{add_meta_node, add_storage_backend, ComputeNodeConfig, HummockInMemoryStrategy}; +use crate::{add_meta_node, ComputeNodeConfig}; pub struct ComputeNodeService { config: ComputeNodeConfig, @@ -42,11 +42,7 @@ impl ComputeNodeService { } /// Apply command args according to config - pub fn apply_command_args( - cmd: &mut Command, - config: &ComputeNodeConfig, - hummock_in_memory_strategy: HummockInMemoryStrategy, - ) -> Result<()> { + pub fn apply_command_args(cmd: &mut Command, config: &ComputeNodeConfig) -> Result<()> { cmd.arg("--listen-addr") .arg(format!("{}:{}", config.listen_address, config.port)) .arg("--prometheus-listener-addr") @@ -65,7 +61,11 @@ impl ComputeNodeService { .arg("--parallelism") .arg(&config.parallelism.to_string()) .arg("--total-memory-bytes") - .arg(&config.total_memory_bytes.to_string()); + .arg(&config.total_memory_bytes.to_string()) + .arg("--memory-control-policy") + .arg(&config.memory_control_policy) + .arg("--streaming-memory-proportion") + .arg(&config.streaming_memory_proportion.to_string()); let provide_jaeger = config.provide_jaeger.as_ref().unwrap(); match provide_jaeger.len() { @@ -81,59 +81,9 @@ impl ComputeNodeService { } } - let provide_minio = config.provide_minio.as_ref().unwrap(); - let provide_opendal = config.provide_opendal.as_ref().unwrap(); - let provide_aws_s3 = config.provide_aws_s3.as_ref().unwrap(); - - let provide_compute_node = config.provide_compute_node.as_ref().unwrap(); - - let is_shared_backend = match ( - config.enable_in_memory_kv_state_backend, - provide_minio.as_slice(), - provide_aws_s3.as_slice(), - provide_opendal.as_slice(), - ) { - (true, [], [], []) => { - cmd.arg("--state-store").arg("in-memory"); - false - } - (true, _, _, _) => { - return Err(anyhow!( - "When `enable_in_memory_kv_state_backend` is enabled, no minio and aws-s3 should be provided.", - )); - } - (_, provide_minio, provide_aws_s3, provide_opendal) => add_storage_backend( - &config.id, - provide_opendal, - provide_minio, - provide_aws_s3, - hummock_in_memory_strategy, - cmd, - )?, - }; - - if provide_compute_node.len() > 1 && !is_shared_backend { - if config.enable_in_memory_kv_state_backend { - // Using a non-shared backend with multiple compute nodes will be problematic for - // state sharing like scaling. However, for distributed end-to-end tests with - // in-memory state store, this is acceptable. - } else { - return Err(anyhow!( - "Hummock storage may behave incorrectly with in-memory backend for multiple compute-node configuration. Should use a shared backend (e.g. MinIO) instead. Consider adding `use: minio` in risedev config." - )); - } - } - let provide_meta_node = config.provide_meta_node.as_ref().unwrap(); add_meta_node(provide_meta_node, cmd)?; - let provide_compactor = config.provide_compactor.as_ref().unwrap(); - if is_shared_backend && provide_compactor.is_empty() { - return Err(anyhow!( - "When using a shared backend (minio, aws-s3, or shared in-memory with `risedev playground`), at least one compactor is required. Consider adding `use: compactor` in risedev config." - )); - } - Ok(()) } } @@ -168,7 +118,7 @@ impl Task for ComputeNodeService { cmd.arg("--config-path") .arg(Path::new(&prefix_config).join("risingwave.toml")); - Self::apply_command_args(&mut cmd, &self.config, HummockInMemoryStrategy::Isolated)?; + Self::apply_command_args(&mut cmd, &self.config)?; if self.config.enable_tiered_cache { let prefix_data = env::var("PREFIX_DATA")?; cmd.arg("--file-cache-dir").arg( diff --git a/src/risedevtool/src/task/connector_service.rs b/src/risedevtool/src/task/connector_service.rs index 9cc589012a44b..0c1dbca173b6d 100644 --- a/src/risedevtool/src/task/connector_service.rs +++ b/src/risedevtool/src/task/connector_service.rs @@ -46,8 +46,8 @@ impl Task for ConnectorNodeService { if !path.exists() { return Err(anyhow!("RisingWave connector binary not found in {:?}\nDid you enable risingwave connector feature in `./risedev configure`?", path)); } - let mut cmd = Command::new("sh"); - cmd.arg(path).arg("-p").arg(self.config.port.to_string()); + let mut cmd = Command::new(path); + cmd.arg("-p").arg(self.config.port.to_string()); ctx.run_command(ctx.tmux_run(cmd)?)?; ctx.pb.set_message("started"); diff --git a/src/risedevtool/src/task/etcd_service.rs b/src/risedevtool/src/task/etcd_service.rs index d442a45beb542..1912ce58f5b1b 100644 --- a/src/risedevtool/src/task/etcd_service.rs +++ b/src/risedevtool/src/task/etcd_service.rs @@ -57,8 +57,6 @@ impl EtcdService { .arg(&advertise_peer_urls) .arg("--listen-metrics-urls") .arg(&exporter_urls) - .arg("--name") - .arg("risedev-meta") .arg("--max-txn-ops") .arg("999999") .arg("--max-request-bytes") diff --git a/src/risedevtool/src/task/meta_node_service.rs b/src/risedevtool/src/task/meta_node_service.rs index a01b917d109ea..535d42fe5b435 100644 --- a/src/risedevtool/src/task/meta_node_service.rs +++ b/src/risedevtool/src/task/meta_node_service.rs @@ -21,7 +21,7 @@ use itertools::Itertools; use super::{ExecuteContext, Task}; use crate::util::{get_program_args, get_program_env_cmd, get_program_name}; -use crate::MetaNodeConfig; +use crate::{add_storage_backend, HummockInMemoryStrategy, MetaNodeConfig}; pub struct MetaNodeService { config: MetaNodeConfig, @@ -43,7 +43,11 @@ impl MetaNodeService { } /// Apply command args according to config - pub fn apply_command_args(cmd: &mut Command, config: &MetaNodeConfig) -> Result<()> { + pub fn apply_command_args( + cmd: &mut Command, + config: &MetaNodeConfig, + hummock_in_memory_strategy: HummockInMemoryStrategy, + ) -> Result<()> { cmd.arg("--listen-addr") .arg(format!("{}:{}", config.listen_address, config.port)) .arg("--advertise-addr") @@ -93,6 +97,57 @@ impl MetaNodeService { } } + let provide_minio = config.provide_minio.as_ref().unwrap(); + let provide_opendal = config.provide_opendal.as_ref().unwrap(); + let provide_aws_s3 = config.provide_aws_s3.as_ref().unwrap(); + + let provide_compute_node = config.provide_compute_node.as_ref().unwrap(); + let provide_compactor = config.provide_compactor.as_ref().unwrap(); + + let is_shared_backend = match ( + config.enable_in_memory_kv_state_backend, + provide_minio.as_slice(), + provide_aws_s3.as_slice(), + provide_opendal.as_slice(), + ) { + (true, [], [], []) => { + cmd.arg("--state-store").arg("in-memory"); + false + } + (true, _, _, _) => { + return Err(anyhow!( + "When `enable_in_memory_kv_state_backend` is enabled, no minio and aws-s3 should be provided.", + )); + } + (_, provide_minio, provide_aws_s3, provide_opendal) => add_storage_backend( + &config.id, + provide_opendal, + provide_minio, + provide_aws_s3, + hummock_in_memory_strategy, + cmd, + )?, + }; + + if (provide_compute_node.len() > 1 || !provide_compactor.is_empty()) && !is_shared_backend { + if config.enable_in_memory_kv_state_backend { + // Using a non-shared backend with multiple compute nodes will be problematic for + // state sharing like scaling. However, for distributed end-to-end tests with + // in-memory state store, this is acceptable. + } else { + return Err(anyhow!( + "Hummock storage may behave incorrectly with in-memory backend for multiple compute-node or compactor-enabled configuration. Should use a shared backend (e.g. MinIO) instead. Consider adding `use: minio` in risedev config." + )); + } + } + + let provide_compactor = config.provide_compactor.as_ref().unwrap(); + if is_shared_backend && provide_compactor.is_empty() { + return Err(anyhow!( + "When using a shared backend (minio, aws-s3, or shared in-memory with `risedev playground`), at least one compactor is required. Consider adding `use: compactor` in risedev config." + )); + } + Ok(()) } } @@ -121,7 +176,7 @@ impl Task for MetaNodeService { ); } - Self::apply_command_args(&mut cmd, &self.config)?; + Self::apply_command_args(&mut cmd, &self.config, HummockInMemoryStrategy::Isolated)?; let prefix_config = env::var("PREFIX_CONFIG")?; cmd.arg("--config-path") diff --git a/src/risedevtool/src/task/utils.rs b/src/risedevtool/src/task/utils.rs index 70767f9c6e393..a758873044522 100644 --- a/src/risedevtool/src/task/utils.rs +++ b/src/risedevtool/src/task/utils.rs @@ -101,25 +101,31 @@ pub fn add_storage_backend( if opendal.engine == "hdfs"{ cmd.arg("--state-store") .arg(format!("hummock+hdfs://{}@{}", opendal.namenode, opendal.root)); - true } else if opendal.engine == "gcs"{ cmd.arg("--state-store") .arg(format!("hummock+gcs://{}@{}", opendal.bucket, opendal.root)); - true} + } else if opendal.engine == "oss"{ cmd.arg("--state-store") .arg(format!("hummock+oss://{}@{}", opendal.bucket, opendal.root)); - true } else if opendal.engine == "webhdfs"{ cmd.arg("--state-store") .arg(format!("hummock+webhdfs://{}@{}", opendal.namenode, opendal.root)); - true + } + else if opendal.engine == "azblob"{ + cmd.arg("--state-store") + .arg(format!("hummock+azblob://{}@{}", opendal.bucket, opendal.root)); + } + else if opendal.engine == "fs"{ + cmd.arg("--state-store") + .arg(format!("hummock+fs://{}@{}", opendal.namenode, opendal.root)); } else{ unimplemented!() } + true } (other_minio, other_s3, _) => { diff --git a/src/rpc_client/src/compute_client.rs b/src/rpc_client/src/compute_client.rs index d7cff5eb9bc93..a0ad224106290 100644 --- a/src/rpc_client/src/compute_client.rs +++ b/src/rpc_client/src/compute_client.rs @@ -30,7 +30,7 @@ use risingwave_pb::monitor_service::{ use risingwave_pb::task_service::exchange_service_client::ExchangeServiceClient; use risingwave_pb::task_service::task_service_client::TaskServiceClient; use risingwave_pb::task_service::{ - AbortTaskRequest, AbortTaskResponse, CreateTaskRequest, ExecuteRequest, GetDataRequest, + CancelTaskRequest, CancelTaskResponse, CreateTaskRequest, ExecuteRequest, GetDataRequest, GetDataResponse, GetStreamRequest, GetStreamResponse, TaskInfoResponse, }; use tokio::sync::mpsc; @@ -157,11 +157,11 @@ impl ComputeClient { Ok(self.task_client.to_owned().execute(req).await?.into_inner()) } - pub async fn abort(&self, req: AbortTaskRequest) -> Result { + pub async fn cancel(&self, req: CancelTaskRequest) -> Result { Ok(self .task_client .to_owned() - .abort_task(req) + .cancel_task(req) .await? .into_inner()) } diff --git a/src/rpc_client/src/connector_client.rs b/src/rpc_client/src/connector_client.rs index 44eb525a44ee3..4192ee9d81c2f 100644 --- a/src/rpc_client/src/connector_client.rs +++ b/src/rpc_client/src/connector_client.rs @@ -15,27 +15,40 @@ use std::collections::HashMap; use std::time::Duration; +use anyhow::anyhow; use async_trait::async_trait; -use risingwave_common::config::MAX_CONNECTION_WINDOW_SIZE; +use risingwave_common::config::{MAX_CONNECTION_WINDOW_SIZE, STREAM_WINDOW_SIZE}; use risingwave_common::util::addr::HostAddr; +use risingwave_pb::catalog::SinkType; use risingwave_pb::connector_service::connector_service_client::ConnectorServiceClient; use risingwave_pb::connector_service::get_event_stream_request::{ - Request, StartSource, ValidateProperties, + Request as SourceRequest, StartSource, ValidateProperties, }; +use risingwave_pb::connector_service::sink_stream_request::{Request as SinkRequest, StartSink}; use risingwave_pb::connector_service::*; +use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; +use tokio_stream::wrappers::UnboundedReceiverStream; use tonic::transport::{Channel, Endpoint}; -use tonic::Streaming; +use tonic::{Request, Streaming}; -use crate::error::Result; +use crate::error::{Result, RpcError}; use crate::RpcClient; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ConnectorClient(ConnectorServiceClient); impl ConnectorClient { pub async fn new(host_addr: HostAddr) -> Result { - let channel = Endpoint::from_shared(format!("http://{}", &host_addr))? + let channel = Endpoint::from_shared(format!("http://{}", &host_addr)) + .map_err(|e| { + RpcError::Internal(anyhow!(format!( + "invalid connector endpoint `{}`: {:?}", + &host_addr, e + ))) + })? .initial_connection_window_size(MAX_CONNECTION_WINDOW_SIZE) + .initial_stream_window_size(STREAM_WINDOW_SIZE) + .tcp_nodelay(true) .connect_timeout(Duration::from_secs(5)) .connect() .await?; @@ -54,7 +67,7 @@ impl ConnectorClient { .0 .to_owned() .get_event_stream(GetEventStreamRequest { - request: Some(Request::Start(StartSource { + request: Some(SourceRequest::Start(StartSource { source_id, source_type: source_type as _, start_offset: start_offset.unwrap_or_default(), @@ -73,7 +86,7 @@ impl ConnectorClient { } /// Validate source properties - pub async fn validate_properties( + pub async fn validate_source_properties( &self, source_id: u64, source_type: SourceType, @@ -84,7 +97,7 @@ impl ConnectorClient { .0 .to_owned() .get_event_stream(GetEventStreamRequest { - request: Some(Request::Validate(ValidateProperties { + request: Some(SourceRequest::Validate(ValidateProperties { source_id, source_type: source_type as _, properties, @@ -101,6 +114,74 @@ impl ConnectorClient { })? .into_inner()) } + + pub async fn start_sink_stream( + &self, + connector_type: String, + properties: HashMap, + table_schema: Option, + sink_payload_format: SinkPayloadFormat, + ) -> Result<(UnboundedSender, Streaming)> { + let (request_sender, request_receiver) = unbounded_channel::(); + + // Send initial request in case of the blocking receive call from creating streaming request + request_sender + .send(SinkStreamRequest { + request: Some(SinkRequest::Start(StartSink { + format: sink_payload_format as i32, + sink_config: Some(SinkConfig { + connector_type, + properties, + table_schema, + }), + })), + }) + .map_err(|err| RpcError::Internal(anyhow!(err.to_string())))?; + + let response = self + .0 + .to_owned() + .sink_stream(Request::new(UnboundedReceiverStream::new(request_receiver))) + .await + .map_err(RpcError::GrpcStatus)? + .into_inner(); + + Ok((request_sender, response)) + } + + pub async fn validate_sink_properties( + &self, + connector_type: String, + properties: HashMap, + table_schema: Option, + sink_type: SinkType, + ) -> Result<()> { + let response = self + .0 + .to_owned() + .validate_sink(ValidateSinkRequest { + sink_config: Some(SinkConfig { + connector_type, + properties, + table_schema, + }), + sink_type: sink_type as i32, + }) + .await + .inspect_err(|err| { + tracing::error!("failed to validate sink properties: {}", err.message()) + })? + .into_inner(); + response.error.map_or_else( + || Ok(()), // If there is no error message, return Ok here. + |err| { + Err(RpcError::Internal(anyhow!(format!( + "sink cannot pass validation: {}", + err.error_message + )))) + }, + ) + } } #[async_trait] diff --git a/src/rpc_client/src/hummock_meta_client.rs b/src/rpc_client/src/hummock_meta_client.rs index 4859842af29e8..827349213a559 100644 --- a/src/rpc_client/src/hummock_meta_client.rs +++ b/src/rpc_client/src/hummock_meta_client.rs @@ -16,10 +16,11 @@ use async_trait::async_trait; use futures::stream::BoxStream; use risingwave_hummock_sdk::table_stats::TableStatsMap; use risingwave_hummock_sdk::{ - HummockEpoch, HummockSstableId, HummockVersionId, LocalSstableInfo, SstIdRange, + HummockEpoch, HummockSstableObjectId, HummockVersionId, LocalSstableInfo, SstObjectIdRange, }; use risingwave_pb::hummock::{ - CompactTask, CompactTaskProgress, HummockSnapshot, HummockVersion, VacuumTask, + CompactTask, CompactTaskProgress, CompactorWorkload, HummockSnapshot, HummockVersion, + VacuumTask, }; use crate::error::Result; @@ -35,15 +36,16 @@ pub trait HummockMetaClient: Send + Sync + 'static { async fn unpin_snapshot(&self) -> Result<()>; async fn unpin_snapshot_before(&self, pinned_epochs: HummockEpoch) -> Result<()>; async fn get_epoch(&self) -> Result; - async fn get_new_sst_ids(&self, number: u32) -> Result; + async fn get_new_sst_ids(&self, number: u32) -> Result; async fn report_compaction_task( &self, compact_task: CompactTask, table_stats_change: TableStatsMap, ) -> Result<()>; - async fn report_compaction_task_progress( + async fn compactor_heartbeat( &self, progress: Vec, + workload: CompactorWorkload, ) -> Result<()>; // We keep `commit_epoch` only for test/benchmark. async fn commit_epoch( @@ -56,6 +58,7 @@ pub trait HummockMetaClient: Send + Sync + 'static { async fn subscribe_compact_tasks( &self, max_concurrent_task_number: u64, + cpu_core_num: u32, ) -> Result>; async fn report_vacuum_task(&self, vacuum_task: VacuumTask) -> Result<()>; async fn trigger_manual_compaction( @@ -64,6 +67,6 @@ pub trait HummockMetaClient: Send + Sync + 'static { table_id: u32, level: u32, ) -> Result<()>; - async fn report_full_scan_task(&self, sst_ids: Vec) -> Result<()>; + async fn report_full_scan_task(&self, object_ids: Vec) -> Result<()>; async fn trigger_full_gc(&self, sst_retention_time_sec: u64) -> Result<()>; } diff --git a/src/rpc_client/src/meta_client.rs b/src/rpc_client/src/meta_client.rs index 436632eae6631..6638b43073601 100644 --- a/src/rpc_client/src/meta_client.rs +++ b/src/rpc_client/src/meta_client.rs @@ -27,23 +27,23 @@ use lru::LruCache; use risingwave_common::catalog::{CatalogVersion, FunctionId, IndexId, TableId}; use risingwave_common::config::MAX_CONNECTION_WINDOW_SIZE; use risingwave_common::system_param::reader::SystemParamsReader; +use risingwave_common::telemetry::report::TelemetryInfoFetcher; use risingwave_common::util::addr::HostAddr; use risingwave_common::util::column_index_mapping::ColIndexMapping; use risingwave_hummock_sdk::compact::CompactorRuntimeConfig; use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::table_stats::to_prost_table_stats_map; use risingwave_hummock_sdk::{ - CompactionGroupId, HummockEpoch, HummockSstableId, HummockVersionId, LocalSstableInfo, - SstIdRange, + CompactionGroupId, HummockEpoch, HummockSstableObjectId, HummockVersionId, LocalSstableInfo, + SstObjectIdRange, }; use risingwave_pb::backup_service::backup_service_client::BackupServiceClient; use risingwave_pb::backup_service::*; use risingwave_pb::catalog::{ - Database as ProstDatabase, Function as ProstFunction, Index as ProstIndex, - Schema as ProstSchema, Sink as ProstSink, Source as ProstSource, Table as ProstTable, - View as ProstView, + Connection, PbDatabase, PbFunction, PbIndex, PbSchema, PbSink, PbSource, PbTable, PbView, }; use risingwave_pb::common::{HostAddress, WorkerType}; +use risingwave_pb::ddl_service::alter_relation_name_request::Relation; use risingwave_pb::ddl_service::ddl_service_client::DdlServiceClient; use risingwave_pb::ddl_service::drop_table_request::SourceId; use risingwave_pb::ddl_service::*; @@ -56,10 +56,11 @@ use risingwave_pb::meta::heartbeat_service_client::HeartbeatServiceClient; use risingwave_pb::meta::list_table_fragments_response::TableFragmentInfo; use risingwave_pb::meta::meta_member_service_client::MetaMemberServiceClient; use risingwave_pb::meta::notification_service_client::NotificationServiceClient; -use risingwave_pb::meta::reschedule_request::Reschedule as ProstReschedule; +use risingwave_pb::meta::reschedule_request::PbReschedule; use risingwave_pb::meta::scale_service_client::ScaleServiceClient; use risingwave_pb::meta::stream_manager_service_client::StreamManagerServiceClient; use risingwave_pb::meta::system_params_service_client::SystemParamsServiceClient; +use risingwave_pb::meta::telemetry_info_service_client::TelemetryInfoServiceClient; use risingwave_pb::meta::*; use risingwave_pb::stream_plan::StreamFragmentGraph; use risingwave_pb::user::update_user_request::UpdateField; @@ -123,6 +124,26 @@ impl MetaClient { .await } + pub async fn create_connection(&self, req: create_connection_request::Payload) -> Result { + let request = CreateConnectionRequest { payload: Some(req) }; + let resp = self.inner.create_connection(request).await?; + Ok(resp.connection_id) + } + + pub async fn list_connections(&self, _name: Option<&str>) -> Result> { + let request = ListConnectionsRequest {}; + let resp = self.inner.list_connections(request).await?; + Ok(resp.connections) + } + + pub async fn drop_connection(&self, connection_name: &str) -> Result<()> { + let request = DropConnectionRequest { + connection_name: connection_name.to_string(), + }; + let _ = self.inner.drop_connection(request).await?; + Ok(()) + } + pub(crate) fn parse_meta_addr(meta_addr: &str) -> Result { if meta_addr.starts_with(Self::META_ADDRESS_LOAD_BALANCE_MODE_PREFIX) { let addr = meta_addr @@ -241,14 +262,14 @@ impl MetaClient { Ok(()) } - pub async fn create_database(&self, db: ProstDatabase) -> Result<(DatabaseId, CatalogVersion)> { + pub async fn create_database(&self, db: PbDatabase) -> Result<(DatabaseId, CatalogVersion)> { let request = CreateDatabaseRequest { db: Some(db) }; let resp = self.inner.create_database(request).await?; // TODO: handle error in `resp.status` here Ok((resp.database_id, resp.version)) } - pub async fn create_schema(&self, schema: ProstSchema) -> Result<(SchemaId, CatalogVersion)> { + pub async fn create_schema(&self, schema: PbSchema) -> Result<(SchemaId, CatalogVersion)> { let request = CreateSchemaRequest { schema: Some(schema), }; @@ -259,7 +280,7 @@ impl MetaClient { pub async fn create_materialized_view( &self, - table: ProstTable, + table: PbTable, graph: StreamFragmentGraph, ) -> Result<(TableId, CatalogVersion)> { let request = CreateMaterializedViewRequest { @@ -280,7 +301,7 @@ impl MetaClient { Ok(resp.version) } - pub async fn create_source(&self, source: ProstSource) -> Result<(u32, CatalogVersion)> { + pub async fn create_source(&self, source: PbSource) -> Result<(u32, CatalogVersion)> { let request = CreateSourceRequest { source: Some(source), }; @@ -291,7 +312,7 @@ impl MetaClient { pub async fn create_sink( &self, - sink: ProstSink, + sink: PbSink, graph: StreamFragmentGraph, ) -> Result<(u32, CatalogVersion)> { let request = CreateSinkRequest { @@ -305,7 +326,7 @@ impl MetaClient { pub async fn create_function( &self, - function: ProstFunction, + function: PbFunction, ) -> Result<(FunctionId, CatalogVersion)> { let request = CreateFunctionRequest { function: Some(function), @@ -316,8 +337,8 @@ impl MetaClient { pub async fn create_table( &self, - source: Option, - table: ProstTable, + source: Option, + table: PbTable, graph: StreamFragmentGraph, ) -> Result<(TableId, CatalogVersion)> { let request = CreateTableRequest { @@ -330,9 +351,22 @@ impl MetaClient { Ok((resp.table_id.into(), resp.version)) } + pub async fn alter_relation_name( + &self, + relation: Relation, + name: &str, + ) -> Result { + let request = AlterRelationNameRequest { + relation: Some(relation), + new_name: name.to_string(), + }; + let resp = self.inner.alter_relation_name(request).await?; + Ok(resp.version) + } + pub async fn replace_table( &self, - table: ProstTable, + table: PbTable, graph: StreamFragmentGraph, table_col_index_mapping: ColIndexMapping, ) -> Result { @@ -346,7 +380,7 @@ impl MetaClient { Ok(resp.version) } - pub async fn create_view(&self, view: ProstView) -> Result<(u32, CatalogVersion)> { + pub async fn create_view(&self, view: PbView) -> Result<(u32, CatalogVersion)> { let request = CreateViewRequest { view: Some(view) }; let resp = self.inner.create_view(request).await?; // TODO: handle error in `resp.status` here @@ -355,8 +389,8 @@ impl MetaClient { pub async fn create_index( &self, - index: ProstIndex, - table: ProstTable, + index: PbIndex, + table: PbTable, graph: StreamFragmentGraph, ) -> Result<(TableId, CatalogVersion)> { let request = CreateIndexRequest { @@ -568,7 +602,7 @@ impl MetaClient { (join_handle, shutdown_tx) } - pub async fn risectl_list_state_tables(&self) -> Result> { + pub async fn risectl_list_state_tables(&self) -> Result> { let request = RisectlListStateTablesRequest {}; let resp = self.inner.risectl_list_state_tables(request).await?; Ok(resp.tables) @@ -615,7 +649,7 @@ impl MetaClient { Ok(resp) } - pub async fn reschedule(&self, reschedules: HashMap) -> Result { + pub async fn reschedule(&self, reschedules: HashMap) -> Result { let request = RescheduleRequest { reschedules }; let resp = self.inner.reschedule(request).await?; Ok(resp.success) @@ -641,7 +675,7 @@ impl MetaClient { pub async fn init_metadata_for_replay( &self, - tables: Vec, + tables: Vec, compaction_groups: Vec, ) -> Result<()> { let req = InitMetadataForReplayRequest { @@ -781,6 +815,12 @@ impl MetaClient { Ok(resp.manifest.expect("should exist")) } + pub async fn get_telemetry_info(&self) -> Result { + let req = GetTelemetryInfoRequest {}; + let resp = self.inner.get_telemetry_info(req).await?; + Ok(resp) + } + pub async fn get_system_params(&self) -> Result { let req = GetSystemParamsRequest {}; let resp = self.inner.get_system_params(req).await?; @@ -869,12 +909,12 @@ impl HummockMetaClient for MetaClient { Ok(()) } - async fn get_new_sst_ids(&self, number: u32) -> Result { + async fn get_new_sst_ids(&self, number: u32) -> Result { let resp = self .inner .get_new_sst_ids(GetNewSstIdsRequest { number }) .await?; - Ok(SstIdRange::new(resp.start_id, resp.end_id)) + Ok(SstObjectIdRange::new(resp.start_id, resp.end_id)) } async fn report_compaction_task( @@ -906,24 +946,28 @@ impl HummockMetaClient for MetaClient { async fn subscribe_compact_tasks( &self, max_concurrent_task_number: u64, + cpu_core_num: u32, ) -> Result> { let req = SubscribeCompactTasksRequest { context_id: self.worker_id(), max_concurrent_task_number, + cpu_core_num, }; let stream = self.inner.subscribe_compact_tasks(req).await?; Ok(Box::pin(stream)) } - async fn report_compaction_task_progress( + async fn compactor_heartbeat( &self, progress: Vec, + workload: CompactorWorkload, ) -> Result<()> { - let req = ReportCompactionTaskProgressRequest { + let req = CompactorHeartbeatRequest { context_id: self.worker_id(), progress, + workload: Some(workload), }; - self.inner.report_compaction_task_progress(req).await?; + self.inner.compactor_heartbeat(req).await?; Ok(()) } @@ -935,8 +979,8 @@ impl HummockMetaClient for MetaClient { Ok(()) } - async fn report_full_scan_task(&self, sst_ids: Vec) -> Result<()> { - let req = ReportFullScanTaskRequest { sst_ids }; + async fn report_full_scan_task(&self, object_ids: Vec) -> Result<()> { + let req = ReportFullScanTaskRequest { object_ids }; self.inner.report_full_scan_task(req).await?; Ok(()) } @@ -971,6 +1015,17 @@ impl HummockMetaClient for MetaClient { } } +#[async_trait] +impl TelemetryInfoFetcher for MetaClient { + async fn fetch_telemetry_info(&self) -> anyhow::Result { + let resp = self.get_telemetry_info().await?; + let tracking_id = resp + .get_tracking_id() + .map_err(|e| anyhow::format_err!("failed to get tracking_id {:?}", e))?; + Ok(tracking_id.to_string()) + } +} + #[derive(Debug, Clone)] struct GrpcMetaClientCore { cluster_client: ClusterServiceClient, @@ -983,6 +1038,7 @@ struct GrpcMetaClientCore { user_client: UserServiceClient, scale_client: ScaleServiceClient, backup_client: BackupServiceClient, + telemetry_client: TelemetryInfoServiceClient, system_params_client: SystemParamsServiceClient, } @@ -998,7 +1054,9 @@ impl GrpcMetaClientCore { let user_client = UserServiceClient::new(channel.clone()); let scale_client = ScaleServiceClient::new(channel.clone()); let backup_client = BackupServiceClient::new(channel.clone()); + let telemetry_client = TelemetryInfoServiceClient::new(channel.clone()); let system_params_client = SystemParamsServiceClient::new(channel); + GrpcMetaClientCore { cluster_client, meta_member_client, @@ -1010,6 +1068,7 @@ impl GrpcMetaClientCore { user_client, scale_client, backup_client, + telemetry_client, system_params_client, } } @@ -1303,7 +1362,6 @@ impl GrpcMetaClient { ))) }) .await?; - Ok(channel) } @@ -1338,6 +1396,7 @@ macro_rules! for_all_meta_rpc { ,{ stream_client, cancel_creating_jobs, CancelCreatingJobsRequest, CancelCreatingJobsResponse } ,{ stream_client, list_table_fragments, ListTableFragmentsRequest, ListTableFragmentsResponse } ,{ ddl_client, create_table, CreateTableRequest, CreateTableResponse } + ,{ ddl_client, alter_relation_name, AlterRelationNameRequest, AlterRelationNameResponse } ,{ ddl_client, create_materialized_view, CreateMaterializedViewRequest, CreateMaterializedViewResponse } ,{ ddl_client, create_view, CreateViewRequest, CreateViewResponse } ,{ ddl_client, create_source, CreateSourceRequest, CreateSourceResponse } @@ -1358,6 +1417,9 @@ macro_rules! for_all_meta_rpc { ,{ ddl_client, replace_table_plan, ReplaceTablePlanRequest, ReplaceTablePlanResponse } ,{ ddl_client, risectl_list_state_tables, RisectlListStateTablesRequest, RisectlListStateTablesResponse } ,{ ddl_client, get_ddl_progress, GetDdlProgressRequest, GetDdlProgressResponse } + ,{ ddl_client, create_connection, CreateConnectionRequest, CreateConnectionResponse } + ,{ ddl_client, list_connections, ListConnectionsRequest, ListConnectionsResponse } + ,{ ddl_client, drop_connection, DropConnectionRequest, DropConnectionResponse } ,{ hummock_client, unpin_version_before, UnpinVersionBeforeRequest, UnpinVersionBeforeResponse } ,{ hummock_client, get_current_version, GetCurrentVersionRequest, GetCurrentVersionResponse } ,{ hummock_client, replay_version_delta, ReplayVersionDeltaRequest, ReplayVersionDeltaResponse } @@ -1373,7 +1435,7 @@ macro_rules! for_all_meta_rpc { ,{ hummock_client, report_compaction_tasks, ReportCompactionTasksRequest, ReportCompactionTasksResponse } ,{ hummock_client, get_new_sst_ids, GetNewSstIdsRequest, GetNewSstIdsResponse } ,{ hummock_client, subscribe_compact_tasks, SubscribeCompactTasksRequest, Streaming } - ,{ hummock_client, report_compaction_task_progress, ReportCompactionTaskProgressRequest, ReportCompactionTaskProgressResponse } + ,{ hummock_client, compactor_heartbeat, CompactorHeartbeatRequest, CompactorHeartbeatResponse } ,{ hummock_client, report_vacuum_task, ReportVacuumTaskRequest, ReportVacuumTaskResponse } ,{ hummock_client, trigger_manual_compaction, TriggerManualCompactionRequest, TriggerManualCompactionResponse } ,{ hummock_client, report_full_scan_task, ReportFullScanTaskRequest, ReportFullScanTaskResponse } @@ -1399,6 +1461,7 @@ macro_rules! for_all_meta_rpc { ,{ backup_client, get_backup_job_status, GetBackupJobStatusRequest, GetBackupJobStatusResponse } ,{ backup_client, delete_meta_snapshot, DeleteMetaSnapshotRequest, DeleteMetaSnapshotResponse} ,{ backup_client, get_meta_snapshot_manifest, GetMetaSnapshotManifestRequest, GetMetaSnapshotManifestResponse} + ,{ telemetry_client, get_telemetry_info, GetTelemetryInfoRequest, TelemetryInfoResponse} ,{ system_params_client, get_system_params, GetSystemParamsRequest, GetSystemParamsResponse } ,{ system_params_client, set_system_param, SetSystemParamRequest, SetSystemParamResponse } } diff --git a/src/source/benches/json_parser.rs b/src/source/benches/json_parser.rs index 22d4a1cb94e7f..e4f9eceb8d3c2 100644 --- a/src/source/benches/json_parser.rs +++ b/src/source/benches/json_parser.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use rand::distributions::Alphanumeric; use rand::prelude::*; use risingwave_common::catalog::ColumnId; @@ -53,97 +53,39 @@ fn generate_all_json() -> Vec> { fn get_descs() -> Vec { vec![ - SourceColumnDesc { - name: "i32".to_string(), - data_type: DataType::Int32, - column_id: ColumnId::from(0), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "bool".to_string(), - data_type: DataType::Boolean, - column_id: ColumnId::from(2), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "i16".to_string(), - data_type: DataType::Int16, - column_id: ColumnId::from(3), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "i64".to_string(), - data_type: DataType::Int64, - column_id: ColumnId::from(4), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "f32".to_string(), - data_type: DataType::Float32, - column_id: ColumnId::from(5), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "f64".to_string(), - data_type: DataType::Float64, - column_id: ColumnId::from(6), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "varchar".to_string(), - data_type: DataType::Varchar, - column_id: ColumnId::from(7), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "date".to_string(), - data_type: DataType::Date, - column_id: ColumnId::from(8), - is_row_id: false, - is_meta: false, - fields: vec![], - }, - SourceColumnDesc { - name: "timestamp".to_string(), - data_type: DataType::Timestamp, - column_id: ColumnId::from(9), - is_row_id: false, - is_meta: false, - fields: vec![], - }, + SourceColumnDesc::simple("i32", DataType::Int32, ColumnId::from(0)), + SourceColumnDesc::simple("bool", DataType::Boolean, ColumnId::from(2)), + SourceColumnDesc::simple("i16", DataType::Int16, ColumnId::from(3)), + SourceColumnDesc::simple("i64", DataType::Int64, ColumnId::from(4)), + SourceColumnDesc::simple("f32", DataType::Float32, ColumnId::from(5)), + SourceColumnDesc::simple("f64", DataType::Float64, ColumnId::from(6)), + SourceColumnDesc::simple("varchar", DataType::Varchar, ColumnId::from(7)), + SourceColumnDesc::simple("date", DataType::Date, ColumnId::from(8)), + SourceColumnDesc::simple("timestamp", DataType::Timestamp, ColumnId::from(9)), ] } fn bench_json_parser(c: &mut Criterion) { let descs = get_descs(); let parser = JsonParser::new_for_test(descs.clone()).unwrap(); - let records = generate_all_json(); let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap(); + let records = generate_all_json(); c.bench_function("json_parser", |b| { - b.to_async(&rt).iter(|| async { - let mut builder = SourceStreamChunkBuilder::with_capacity(descs.clone(), NUM_RECORDS); - for record in &records { - let writer = builder.row_writer(); - parser.parse_inner(record, writer).await.unwrap(); - } - }) + b.to_async(&rt).iter_batched( + || records.clone(), + |records| async { + let mut builder = + SourceStreamChunkBuilder::with_capacity(descs.clone(), NUM_RECORDS); + for record in records { + let writer = builder.row_writer(); + parser.parse_inner(record, writer).await.unwrap(); + } + }, + BatchSize::SmallInput, + ) }); } diff --git a/src/source/src/connector_source.rs b/src/source/src/connector_source.rs index a131e4c9a3928..979e905b0e596 100644 --- a/src/source/src/connector_source.rs +++ b/src/source/src/connector_source.rs @@ -101,10 +101,10 @@ impl ConnectorSource { let data_gen_columns = Some( columns .iter() - .cloned() .map(|col| Column { - name: col.name, - data_type: col.data_type, + name: col.name.clone(), + data_type: col.data_type.clone(), + is_visible: col.is_visible(), }) .collect_vec(), ); diff --git a/src/source/src/lib.rs b/src/source/src/lib.rs index d9b0725e55388..ce31cf283be76 100644 --- a/src/source/src/lib.rs +++ b/src/source/src/lib.rs @@ -32,5 +32,4 @@ pub mod connector_source; pub mod source_desc; pub use source_desc::test_utils as connector_test_utils; pub mod fs_connector_source; -pub mod row_id; mod table; diff --git a/src/source/src/source_desc.rs b/src/source/src/source_desc.rs index d6c0036b6fb28..2bcc5bfec1ab8 100644 --- a/src/source/src/source_desc.rs +++ b/src/source/src/source_desc.rs @@ -22,10 +22,8 @@ use risingwave_connector::parser::SpecificParserConfig; use risingwave_connector::source::monitor::SourceMetrics; use risingwave_connector::source::{SourceColumnDesc, SourceFormat}; use risingwave_connector::ConnectorParams; -use risingwave_pb::catalog::StreamSourceInfo as ProstStreamSourceInfo; -use risingwave_pb::plan_common::{ - ColumnCatalog as ProstColumnCatalog, RowFormatType as ProstRowFormatType, -}; +use risingwave_pb::catalog::PbStreamSourceInfo; +use risingwave_pb::plan_common::{PbColumnCatalog, PbRowFormatType}; use crate::connector_source::ConnectorSource; use crate::fs_connector_source::FsConnectorSource; @@ -54,12 +52,12 @@ pub struct FsSourceDesc { #[derive(Clone)] pub struct SourceDescBuilder { - columns: Vec, + columns: Vec, metrics: Arc, pk_column_ids: Vec, row_id_index: Option, properties: HashMap, - source_info: ProstStreamSourceInfo, + source_info: PbStreamSourceInfo, connector_params: ConnectorParams, connector_message_buffer_size: usize, } @@ -67,12 +65,12 @@ pub struct SourceDescBuilder { impl SourceDescBuilder { #[allow(clippy::too_many_arguments)] pub fn new( - columns: Vec, + columns: Vec, metrics: Arc, pk_column_ids: Vec, row_id_index: Option, properties: HashMap, - source_info: ProstStreamSourceInfo, + source_info: PbStreamSourceInfo, connector_params: ConnectorParams, connector_message_buffer_size: usize, ) -> Self { @@ -90,16 +88,16 @@ impl SourceDescBuilder { pub async fn build(self) -> Result { let format = match self.source_info.get_row_format()? { - ProstRowFormatType::Json => SourceFormat::Json, - ProstRowFormatType::Protobuf => SourceFormat::Protobuf, - ProstRowFormatType::DebeziumJson => SourceFormat::DebeziumJson, - ProstRowFormatType::Avro => SourceFormat::Avro, - ProstRowFormatType::Maxwell => SourceFormat::Maxwell, - ProstRowFormatType::CanalJson => SourceFormat::CanalJson, - ProstRowFormatType::Native => SourceFormat::Native, - ProstRowFormatType::DebeziumAvro => SourceFormat::DebeziumAvro, - ProstRowFormatType::UpsertJson => SourceFormat::UpsertJson, - ProstRowFormatType::UpsertAvro => SourceFormat::UpsertAvro, + PbRowFormatType::Json => SourceFormat::Json, + PbRowFormatType::Protobuf => SourceFormat::Protobuf, + PbRowFormatType::DebeziumJson => SourceFormat::DebeziumJson, + PbRowFormatType::Avro => SourceFormat::Avro, + PbRowFormatType::Maxwell => SourceFormat::Maxwell, + PbRowFormatType::CanalJson => SourceFormat::CanalJson, + PbRowFormatType::Native => SourceFormat::Native, + PbRowFormatType::DebeziumAvro => SourceFormat::DebeziumAvro, + PbRowFormatType::UpsertJson => SourceFormat::UpsertJson, + PbRowFormatType::UpsertAvro => SourceFormat::UpsertAvro, _ => unreachable!(), }; @@ -146,8 +144,8 @@ impl SourceDescBuilder { pub async fn build_fs_source_desc(&self) -> Result { let format = match self.source_info.get_row_format()? { - ProstRowFormatType::Csv => SourceFormat::Csv, - ProstRowFormatType::Json => SourceFormat::Json, + PbRowFormatType::Csv => SourceFormat::Csv, + PbRowFormatType::Json => SourceFormat::Json, _ => unreachable!(), }; @@ -214,6 +212,7 @@ pub mod test_utils { name: f.name.clone(), field_descs: vec![], type_name: "".to_string(), + generated_column: None, } .to_protobuf(), ), diff --git a/src/source/src/table.rs b/src/source/src/table.rs index 1e1b9e16f5d70..778b33846d525 100644 --- a/src/source/src/table.rs +++ b/src/source/src/table.rs @@ -100,7 +100,9 @@ impl TableDmlHandle { #[cfg(debug_assertions)] risingwave_common::util::schema_check::schema_check( - self.column_descs.iter().map(|c| &c.data_type), + self.column_descs + .iter() + .filter_map(|c| (!c.is_generated_column()).then_some(&c.data_type)), chunk.columns(), ) .expect("table source write chunk schema check failed"); diff --git a/src/sqlparser/examples/parse.rs b/src/sqlparser/examples/parse.rs index 73587757de75f..40c9d52a1aeab 100644 --- a/src/sqlparser/examples/parse.rs +++ b/src/sqlparser/examples/parse.rs @@ -27,7 +27,7 @@ fn main() { continue; } - let tokens = Tokenizer::new(&sql).tokenize().unwrap(); + let tokens = Tokenizer::new(&sql).tokenize_with_location().unwrap(); println!("tokens: {:?}", tokens); let ast = Parser::parse_sql(&sql).unwrap(); println!("ast: {:?}", ast); diff --git a/src/sqlparser/src/ast/ddl.rs b/src/sqlparser/src/ast/ddl.rs index 21f24bb67e4cb..b6aa7ecb3629b 100644 --- a/src/sqlparser/src/ast/ddl.rs +++ b/src/sqlparser/src/ast/ddl.rs @@ -77,6 +77,27 @@ pub enum AlterTableOperation { }, } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))] +pub enum AlterIndexOperation { + RenameIndex { index_name: ObjectName }, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))] +pub enum AlterViewOperation { + RenameView { view_name: ObjectName }, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "visitor", derive(Visit, VisitMut))] +pub enum AlterSinkOperation { + RenameSink { sink_name: ObjectName }, +} + impl fmt::Display for AlterTableOperation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -133,6 +154,36 @@ impl fmt::Display for AlterTableOperation { } } +impl fmt::Display for AlterIndexOperation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AlterIndexOperation::RenameIndex { index_name } => { + write!(f, "RENAME TO {index_name}") + } + } + } +} + +impl fmt::Display for AlterViewOperation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AlterViewOperation::RenameView { view_name } => { + write!(f, "RENAME TO {view_name}") + } + } + } +} + +impl fmt::Display for AlterSinkOperation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AlterSinkOperation::RenameSink { sink_name } => { + write!(f, "RENAME TO {sink_name}") + } + } + } +} + /// An `ALTER COLUMN` (`Statement::AlterTable`) operation #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -371,6 +422,8 @@ pub enum ColumnOption { /// - MySQL's `AUTO_INCREMENT` or SQLite's `AUTOINCREMENT` /// - ... DialectSpecific(Vec), + /// AS ( )` + GeneratedColumns(Expr), } impl fmt::Display for ColumnOption { @@ -403,6 +456,7 @@ impl fmt::Display for ColumnOption { } Check(expr) => write!(f, "CHECK ({})", expr), DialectSpecific(val) => write!(f, "{}", display_separated(val, " ")), + GeneratedColumns(expr) => write!(f, "AS {}", expr), } } } diff --git a/src/sqlparser/src/ast/mod.rs b/src/sqlparser/src/ast/mod.rs index 39cd6764198ba..0ade589b9418a 100644 --- a/src/sqlparser/src/ast/mod.rs +++ b/src/sqlparser/src/ast/mod.rs @@ -43,6 +43,7 @@ pub use self::query::{ }; pub use self::statement::*; pub use self::value::{DateTimeField, DollarQuotedString, TrimWhereField, Value}; +pub use crate::ast::ddl::{AlterIndexOperation, AlterSinkOperation, AlterViewOperation}; use crate::keywords::Keyword; use crate::parser::{Parser, ParserError}; @@ -981,7 +982,7 @@ pub enum Statement { temporary: bool, name: ObjectName, args: Option>, - return_type: Option, + returns: Option, /// Optional parameters. params: CreateFunctionBody, }, @@ -991,6 +992,25 @@ pub enum Statement { name: ObjectName, operation: AlterTableOperation, }, + /// ALTER INDEX + AlterIndex { + /// Index name + name: ObjectName, + operation: AlterIndexOperation, + }, + /// ALTER VIEW + AlterView { + /// View name + name: ObjectName, + materialized: bool, + operation: AlterViewOperation, + }, + /// ALTER SINK + AlterSink { + /// Sink name + name: ObjectName, + operation: AlterSinkOperation, + }, /// DESCRIBE TABLE OR SOURCE Describe { /// Table or Source name @@ -1041,6 +1061,11 @@ pub enum Statement { snapshot: Option, session: bool, }, + /// `SET [ SESSION | LOCAL ] TIME ZONE { value | 'value' | LOCAL | DEFAULT }` + SetTimeZone { + local: bool, + value: SetTimeZoneValue, + }, /// `COMMENT ON ...` /// /// Note: this is a PostgreSQL-specific statement. @@ -1250,7 +1275,7 @@ impl fmt::Display for Statement { temporary, name, args, - return_type, + returns, params, } => { write!( @@ -1262,8 +1287,8 @@ impl fmt::Display for Statement { if let Some(args) = args { write!(f, "({})", display_comma_separated(args))?; } - if let Some(return_type) = return_type { - write!(f, " RETURNS {}", return_type)?; + if let Some(return_type) = returns { + write!(f, " {}", return_type)?; } write!(f, "{params}")?; Ok(()) @@ -1358,7 +1383,7 @@ impl fmt::Display for Statement { if_not_exists = if *if_not_exists { "IF NOT EXISTS " } else { "" }, name = name, table_name = table_name, - columns = display_separated(columns, ","), + columns = display_comma_separated(columns), include = if include.is_empty() { "".to_string() } else { @@ -1381,6 +1406,15 @@ impl fmt::Display for Statement { Statement::AlterTable { name, operation } => { write!(f, "ALTER TABLE {} {}", name, operation) } + Statement::AlterIndex { name, operation } => { + write!(f, "ALTER INDEX {} {}", name, operation) + } + Statement::AlterView { materialized, name, operation } => { + write!(f, "ALTER {}VIEW {} {}", if *materialized { "MATERIALIZED " } else { "" }, name, operation) + } + Statement::AlterSink { name, operation } => { + write!(f, "ALTER SINK {} {}", name, operation) + } Statement::Drop(stmt) => write!(f, "DROP {}", stmt), Statement::DropFunction { if_exists, @@ -1450,6 +1484,14 @@ impl fmt::Display for Statement { } Ok(()) } + Statement::SetTimeZone { local, value } => { + write!(f, "SET")?; + if *local { + write!(f, " LOCAL")?; + } + write!(f, " TIME ZONE {}", value)?; + Ok(()) + } Statement::Commit { chain } => { write!(f, "COMMIT{}", if *chain { " AND CHAIN" } else { "" },) } @@ -1975,6 +2017,26 @@ impl fmt::Display for EmitMode { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum SetTimeZoneValue { + Ident(Ident), + Literal(Value), + Local, + Default, +} + +impl fmt::Display for SetTimeZoneValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SetTimeZoneValue::Ident(ident) => write!(f, "{}", ident), + SetTimeZoneValue::Literal(value) => write!(f, "{}", value), + SetTimeZoneValue::Local => f.write_str("LOCAL"), + SetTimeZoneValue::Default => f.write_str("DEFAULT"), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum TransactionMode { @@ -2188,6 +2250,41 @@ impl fmt::Display for FunctionDefinition { } } +/// Return types of a function. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum CreateFunctionReturns { + /// RETURNS rettype + Value(DataType), + /// RETURNS TABLE ( column_name column_type [, ...] ) + Table(Vec), +} + +impl fmt::Display for CreateFunctionReturns { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Value(data_type) => write!(f, "RETURNS {}", data_type), + Self::Table(columns) => { + write!(f, "RETURNS TABLE ({})", display_comma_separated(columns)) + } + } + } +} + +/// Table column definition +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct TableColumnDef { + pub name: Ident, + pub data_type: DataType, +} + +impl fmt::Display for TableColumnDef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} {}", self.name, self.data_type) + } +} + /// Postgres specific feature. /// /// See [Postgresdocs](https://www.postgresql.org/docs/15/sql-createfunction.html) diff --git a/src/sqlparser/src/ast/query.rs b/src/sqlparser/src/ast/query.rs index 17df0434b1d56..31b7074b6e67d 100644 --- a/src/sqlparser/src/ast/query.rs +++ b/src/sqlparser/src/ast/query.rs @@ -339,6 +339,7 @@ pub enum TableFactor { Table { name: ObjectName, alias: Option, + for_system_time_as_of_now: bool, }, Derived { lateral: bool, @@ -363,8 +364,15 @@ pub enum TableFactor { impl fmt::Display for TableFactor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - TableFactor::Table { name, alias } => { + TableFactor::Table { + name, + alias, + for_system_time_as_of_now, + } => { write!(f, "{}", name)?; + if *for_system_time_as_of_now { + write!(f, " FOR SYSTEM_TIME AS OF NOW()")?; + } if let Some(alias) = alias { write!(f, " AS {}", alias)?; } diff --git a/src/sqlparser/src/ast/statement.rs b/src/sqlparser/src/ast/statement.rs index 830727495b50c..3494bd9db92e2 100644 --- a/src/sqlparser/src/ast/statement.rs +++ b/src/sqlparser/src/ast/statement.rs @@ -710,7 +710,7 @@ impl ParseTo for UserOptions { break; } - if let Token::Word(ref w) = token { + if let Token::Word(ref w) = token.token { parser.next_token(); let (item_mut_ref, user_option) = match w.keyword { Keyword::SUPERUSER => (&mut builder.super_user, UserOption::SuperUser), diff --git a/src/sqlparser/src/parser.rs b/src/sqlparser/src/parser.rs index 2b6654d0cc4df..aa3a4ab86cb59 100644 --- a/src/sqlparser/src/parser.rs +++ b/src/sqlparser/src/parser.rs @@ -24,7 +24,9 @@ use core::fmt; use tracing::{debug, instrument}; -use crate::ast::ddl::SourceWatermark; +use crate::ast::ddl::{ + AlterIndexOperation, AlterSinkOperation, AlterViewOperation, SourceWatermark, +}; use crate::ast::{ParseTo, *}; use crate::keywords::{self, Keyword}; use crate::tokenizer::*; @@ -141,7 +143,7 @@ pub enum Precedence { } pub struct Parser { - tokens: Vec, + tokens: Vec, /// The index of the first unprocessed token in `self.tokens` index: usize, /// Since we cannot distinguish `>>` and double `>`, so use `angle_brackets_num` to store the @@ -157,7 +159,7 @@ pub struct Parser { impl Parser { /// Parse the specified tokens - pub fn new(tokens: Vec) -> Self { + pub fn new(tokens: Vec) -> Self { Parser { tokens, index: 0, @@ -171,7 +173,7 @@ impl Parser { #[instrument(level = "debug")] pub fn parse_sql(sql: &str) -> Result, ParserError> { let mut tokenizer = Tokenizer::new(sql); - let tokens = tokenizer.tokenize()?; + let tokens = tokenizer.tokenize_with_location()?; let mut parser = Parser::new(tokens); let mut stmts = Vec::new(); let mut expecting_statement_delimiter = false; @@ -199,7 +201,8 @@ impl Parser { /// Parse a single top-level statement (such as SELECT, INSERT, CREATE, etc.), /// stopping before the statement separator, if any. pub fn parse_statement(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::EXPLAIN => Ok(self.parse_explain()?), Keyword::ANALYZE => Ok(self.parse_analyze()?), @@ -243,13 +246,18 @@ impl Parser { Keyword::PREPARE => Ok(self.parse_prepare()?), Keyword::COMMENT => Ok(self.parse_comment()?), Keyword::FLUSH => Ok(Statement::Flush), - _ => self.expected("an SQL statement", Token::Word(w)), + _ => self.expected( + "an SQL statement", + Token::Word(w).with_location(token.location), + ), }, Token::LParen => { self.prev_token(); Ok(Statement::Query(Box::new(self.parse_query()?))) } - unexpected => self.expected("an SQL statement", unexpected), + unexpected => { + self.expected("an SQL statement", unexpected.with_location(token.location)) + } } } @@ -295,7 +303,7 @@ impl Parser { pub fn parse_wildcard_or_expr(&mut self) -> Result { let index = self.index; - match self.next_token() { + match self.next_token().token { Token::Word(w) if self.peek_token() == Token::Period => { // Since there's no parenthesis, `w` must be a column or a table // So what follows must be dot-delimited identifiers, e.g. `a.b.c.*` @@ -391,7 +399,8 @@ impl Parser { ) -> Result { let mut id_parts = vec![]; while self.consume_token(&Token::Period) { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => id_parts.push(w.to_ident()?), Token::Mul => { return if id_parts.is_empty() { @@ -401,7 +410,10 @@ impl Parser { } } unexpected => { - return self.expected("an identifier or a '*' after '.'", unexpected); + return self.expected( + "an identifier or a '*' after '.'", + unexpected.with_location(token.location), + ); } } } @@ -468,7 +480,8 @@ impl Parser { } })); - let expr = match self.next_token() { + let token = self.next_token(); + let expr = match token.token { Token::Word(w) => match w.keyword { Keyword::TRUE | Keyword::FALSE | Keyword::NULL => { self.prev_token(); @@ -497,15 +510,18 @@ impl Parser { } // Here `w` is a word, check if it's a part of a multi-part // identifier, a function call, or a simple identifier: - _ => match self.peek_token() { + _ => match self.peek_token().token { Token::LParen | Token::Period => { let mut id_parts: Vec = vec![w.to_ident()?]; while self.consume_token(&Token::Period) { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => id_parts.push(w.to_ident()?), unexpected => { - return self - .expected("an identifier or a '*' after '.'", unexpected); + return self.expected( + "an identifier or a '*' after '.'", + unexpected.with_location(token.location), + ); } } } @@ -591,7 +607,7 @@ impl Parser { Ok(expr) } } - unexpected => self.expected("an expression:", unexpected), + unexpected => self.expected("an expression:", unexpected.with_location(token.location)), }?; if self.parse_keyword(Keyword::COLLATE) { @@ -627,12 +643,16 @@ impl Parser { pub fn parse_fields(&mut self) -> Result, ParserError> { let mut idents = vec![]; while self.consume_token(&Token::Period) { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => { idents.push(w.to_ident()?); } unexpected => { - return self.expected("an identifier after '.'", unexpected); + return self.expected( + "an identifier after '.'", + unexpected.with_location(token.location), + ); } } } @@ -695,14 +715,21 @@ impl Parser { } pub fn parse_window_frame_units(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::ROWS => Ok(WindowFrameUnits::Rows), Keyword::RANGE => Ok(WindowFrameUnits::Range), Keyword::GROUPS => Ok(WindowFrameUnits::Groups), - _ => self.expected("ROWS, RANGE, GROUPS", Token::Word(w))?, + _ => self.expected( + "ROWS, RANGE, GROUPS", + Token::Word(w).with_location(token.location), + )?, }, - unexpected => self.expected("ROWS, RANGE, GROUPS", unexpected), + unexpected => self.expected( + "ROWS, RANGE, GROUPS", + unexpected.with_location(token.location), + ), } } @@ -930,7 +957,7 @@ impl Parser { pub fn parse_trim_expr(&mut self) -> Result { self.expect_token(&Token::LParen)?; let mut where_expr = None; - if let Token::Word(word) = self.peek_token() { + if let Token::Word(word) = self.peek_token().token { if [Keyword::BOTH, Keyword::LEADING, Keyword::TRAILING] .iter() .any(|d| word.keyword == *d) @@ -951,14 +978,20 @@ impl Parser { } pub fn parse_trim_where(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::BOTH => Ok(TrimWhereField::Both), Keyword::LEADING => Ok(TrimWhereField::Leading), Keyword::TRAILING => Ok(TrimWhereField::Trailing), - _ => self.expected("trim_where field", Token::Word(w))?, + _ => self.expected( + "trim_where field", + Token::Word(w).with_location(token.location), + )?, }, - unexpected => self.expected("trim_where field", unexpected), + unexpected => { + self.expected("trim_where field", unexpected.with_location(token.location)) + } } } @@ -1007,7 +1040,8 @@ impl Parser { // This function parses date/time fields for interval qualifiers. pub fn parse_date_time_field(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::YEAR => Ok(DateTimeField::Year), Keyword::MONTH => Ok(DateTimeField::Month), @@ -1015,9 +1049,14 @@ impl Parser { Keyword::HOUR => Ok(DateTimeField::Hour), Keyword::MINUTE => Ok(DateTimeField::Minute), Keyword::SECOND => Ok(DateTimeField::Second), - _ => self.expected("date/time field", Token::Word(w))?, + _ => self.expected( + "date/time field", + Token::Word(w).with_location(token.location), + )?, }, - unexpected => self.expected("date/time field", unexpected), + unexpected => { + self.expected("date/time field", unexpected.with_location(token.location)) + } } } @@ -1031,10 +1070,13 @@ impl Parser { // select extract('invaLId' from null::date); // ``` pub fn parse_date_time_field_in_extract(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => Ok(w.value.to_uppercase()), Token::SingleQuotedString(s) => Ok(s.to_uppercase()), - unexpected => self.expected("date/time field", unexpected), + unexpected => { + self.expected("date/time field", unexpected.with_location(token.location)) + } } } @@ -1065,7 +1107,7 @@ impl Parser { // // Note that PostgreSQL allows omitting the qualifier, so we provide // this more general implementation. - let leading_field = match self.peek_token() { + let leading_field = match self.peek_token().token { Token::Word(kw) if [ Keyword::YEAR, @@ -1120,7 +1162,7 @@ impl Parser { /// Parse an operator following an expression pub fn parse_infix(&mut self, expr: Expr, precedence: Precedence) -> Result { let tok = self.next_token(); - let regular_binary_operator = match &tok { + let regular_binary_operator = match &tok.token { Token::Spaceship => Some(BinaryOperator::Spaceship), Token::DoubleEq => Some(BinaryOperator::Eq), Token::Eq => Some(BinaryOperator::Eq), @@ -1204,7 +1246,7 @@ impl Parser { right: Box::new(self.parse_subexpr(precedence)?), }) } - } else if let Token::Word(w) = &tok { + } else if let Token::Word(w) = &tok.token { match w.keyword { Keyword::IS => { if self.parse_keyword(Keyword::TRUE) { @@ -1235,15 +1277,15 @@ impl Parser { } Keyword::AT => { if self.parse_keywords(&[Keyword::TIME, Keyword::ZONE]) { - let time_zone = self.next_token(); - match time_zone { + let token = self.next_token(); + match token.token { Token::SingleQuotedString(time_zone) => Ok(Expr::AtTimeZone { timestamp: Box::new(expr), time_zone, }), - tok => self.expected( + unexpected => self.expected( "Expected Token::SingleQuotedString after AT TIME ZONE", - tok, + unexpected.with_location(token.location), ), } } else { @@ -1348,7 +1390,7 @@ impl Parser { let token = self.peek_token(); debug!("get_next_precedence() {:?}", token); - match token { + match token.token { Token::Word(w) if w.keyword == Keyword::OR => Ok(P::LogicalOr), Token::Word(w) if w.keyword == Keyword::XOR => Ok(P::LogicalXor), Token::Word(w) if w.keyword == Keyword::AND => Ok(P::LogicalAnd), @@ -1363,7 +1405,7 @@ impl Parser { | Token::GtEq | Token::DoubleEq | Token::Spaceship => Ok(P::Cmp), - Token::Word(w) if w.keyword == Keyword::NOT => match self.peek_nth_token(1) { + Token::Word(w) if w.keyword == Keyword::NOT => match self.peek_nth_token(1).token { // The precedence of NOT varies depending on keyword that // follows it. If it is followed by IN, BETWEEN, or LIKE, // it takes on the precedence of those tokens. Otherwise it @@ -1391,7 +1433,7 @@ impl Parser { | Token::HashArrow | Token::HashLongArrow => Ok(P::Other), Token::Word(w) if w.keyword == Keyword::AT => { - match (self.peek_nth_token(1), self.peek_nth_token(2)) { + match (self.peek_nth_token(1).token, self.peek_nth_token(2).token) { (Token::Word(w), Token::Word(w2)) if w.keyword == Keyword::TIME && w2.keyword == Keyword::ZONE => { @@ -1419,20 +1461,23 @@ impl Parser { /// Return the first non-whitespace token that has not yet been processed /// (or None if reached end-of-file) - pub fn peek_token(&self) -> Token { + pub fn peek_token(&self) -> TokenWithLocation { self.peek_nth_token(0) } /// Return nth non-whitespace token that has not yet been processed - pub fn peek_nth_token(&self, mut n: usize) -> Token { + pub fn peek_nth_token(&self, mut n: usize) -> TokenWithLocation { let mut index = self.index; loop { index += 1; - match self.tokens.get(index - 1) { + let token = self.tokens.get(index - 1); + match token.map(|x| &x.token) { Some(Token::Whitespace(_)) => continue, - non_whitespace => { + _ => { if n == 0 { - return non_whitespace.cloned().unwrap_or(Token::EOF); + return token + .cloned() + .unwrap_or(TokenWithLocation::wrap(Token::EOF)); } n -= 1; } @@ -1443,18 +1488,23 @@ impl Parser { /// Return the first non-whitespace token that has not yet been processed /// (or None if reached end-of-file) and mark it as processed. OK to call /// repeatedly after reaching EOF. - pub fn next_token(&mut self) -> Token { + pub fn next_token(&mut self) -> TokenWithLocation { loop { self.index += 1; - match self.tokens.get(self.index - 1) { + let token = self.tokens.get(self.index - 1); + match token.map(|x| &x.token) { Some(Token::Whitespace(_)) => continue, - token => return token.cloned().unwrap_or(Token::EOF), + _ => { + return token + .cloned() + .unwrap_or(TokenWithLocation::wrap(Token::EOF)) + } } } } /// Return the first unprocessed token, possibly whitespace. - pub fn next_token_no_skip(&mut self) -> Option<&Token> { + pub fn next_token_no_skip(&mut self) -> Option<&TokenWithLocation> { self.index += 1; self.tokens.get(self.index - 1) } @@ -1466,7 +1516,7 @@ impl Parser { loop { assert!(self.index > 0); self.index -= 1; - if let Some(Token::Whitespace(_)) = self.tokens.get(self.index) { + if let Some(token) = self.tokens.get(self.index) && let Token::Whitespace(_) = token.token { continue; } return; @@ -1474,14 +1524,31 @@ impl Parser { } /// Report unexpected token - pub fn expected(&self, expected: &str, found: Token) -> Result { - parser_err!(format!("Expected {}, found: {}", expected, found)) + pub fn expected(&self, expected: &str, found: TokenWithLocation) -> Result { + let start_off = self.index.saturating_sub(10); + let end_off = self.index.min(self.tokens.len()); + let near_tokens = &self.tokens[start_off..end_off]; + struct TokensDisplay<'a>(&'a [TokenWithLocation]); + impl<'a> fmt::Display for TokensDisplay<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for token in self.0 { + write!(f, "{}", token.token)?; + } + Ok(()) + } + } + parser_err!(format!( + "Expected {}, found: {}\nNear \"{}\"", + expected, + found, + TokensDisplay(near_tokens), + )) } /// Look for an expected keyword and consume it if it exists #[must_use] pub fn parse_keyword(&mut self, expected: Keyword) -> bool { - match self.peek_token() { + match self.peek_token().token { Token::Word(w) if expected == w.keyword => { self.next_token(); true @@ -1508,7 +1575,7 @@ impl Parser { /// Look for one of the given keywords and return the one that matches. #[must_use] pub fn parse_one_of_keywords(&mut self, keywords: &[Keyword]) -> Option { - match self.peek_token() { + match self.peek_token().token { Token::Word(w) => { keywords .iter() @@ -1523,7 +1590,7 @@ impl Parser { } pub fn peek_nth_any_of_keywords(&mut self, n: usize, keywords: &[Keyword]) -> bool { - match self.peek_nth_token(n) { + match self.peek_nth_token(n).token { Token::Word(w) => keywords.iter().any(|keyword| *keyword == w.keyword), _ => false, } @@ -1768,7 +1835,23 @@ impl Parser { self.expect_token(&Token::RParen)?; let return_type = if self.parse_keyword(Keyword::RETURNS) { - Some(self.parse_data_type()?) + if self.parse_keyword(Keyword::TABLE) { + self.expect_token(&Token::LParen)?; + let mut values = vec![]; + loop { + values.push(self.parse_table_column_def()?); + let comma = self.consume_token(&Token::Comma); + if self.consume_token(&Token::RParen) { + // allow a trailing comma, even though it's not in standard + break; + } else if !comma { + return self.expected("',' or ')'", self.peek_token()); + } + } + Some(CreateFunctionReturns::Table(values)) + } else { + Some(CreateFunctionReturns::Value(self.parse_data_type()?)) + } } else { None }; @@ -1780,11 +1863,18 @@ impl Parser { temporary, name, args, - return_type, + returns: return_type, params, }) } + fn parse_table_column_def(&mut self) -> Result { + Ok(TableColumnDef { + name: self.parse_identifier_non_reserved()?, + data_type: self.parse_data_type()?, + }) + } + fn parse_function_arg(&mut self) -> Result { let mode = if self.parse_keyword(Keyword::IN) { Some(ArgMode::In) @@ -1866,7 +1956,7 @@ impl Parser { Keyword::LINK => Ok(CreateFunctionUsing::Link(uri)), _ => self.expected( "LINK, got {:?}", - Token::make_keyword(format!("{keyword:?}").as_str()), + TokenWithLocation::wrap(Token::make_keyword(format!("{keyword:?}").as_str())), ), } } @@ -2074,7 +2164,7 @@ impl Parser { "Only 1 watermark is allowed to be defined on source.".to_string(), )); } - } else if let Token::Word(_) = self.peek_token() { + } else if let Token::Word(_) = self.peek_token().token { columns.push(self.parse_column_def()?); } else { return self.expected("column name or constraint definition", self.peek_token()); @@ -2093,7 +2183,7 @@ impl Parser { fn parse_column_def(&mut self) -> Result { let name = self.parse_identifier_non_reserved()?; - let data_type = if let Token::Word(_) = self.peek_token() { + let data_type = if let Token::Word(_) = self.peek_token().token { Some(self.parse_data_type()?) } else { None @@ -2170,6 +2260,8 @@ impl Parser { let expr = self.parse_expr()?; self.expect_token(&Token::RParen)?; Ok(Some(ColumnOption::Check(expr))) + } else if self.parse_keyword(Keyword::AS) { + Ok(Some(ColumnOption::GeneratedColumns(self.parse_expr()?))) } else { Ok(None) } @@ -2214,7 +2306,8 @@ impl Parser { } else { None }; - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) if w.keyword == Keyword::PRIMARY || w.keyword == Keyword::UNIQUE => { let is_primary = w.keyword == Keyword::PRIMARY; if is_primary { @@ -2263,7 +2356,10 @@ impl Parser { } unexpected => { if name.is_some() { - self.expected("PRIMARY, UNIQUE, FOREIGN, or CHECK", unexpected) + self.expected( + "PRIMARY, UNIQUE, FOREIGN, or CHECK", + unexpected.with_location(token.location), + ) } else { self.prev_token(); Ok(None) @@ -2321,6 +2417,14 @@ impl Parser { pub fn parse_alter(&mut self) -> Result { if self.parse_keyword(Keyword::TABLE) { self.parse_alter_table() + } else if self.parse_keyword(Keyword::INDEX) { + self.parse_alter_index() + } else if self.parse_keyword(Keyword::VIEW) { + self.parse_alter_view(false) + } else if self.parse_keywords(&[Keyword::MATERIALIZED, Keyword::VIEW]) { + self.parse_alter_view(true) + } else if self.parse_keyword(Keyword::SINK) { + self.parse_alter_sink() } else if self.parse_keyword(Keyword::USER) { self.parse_alter_user() } else if self.parse_keyword(Keyword::SYSTEM) { @@ -2421,6 +2525,70 @@ impl Parser { }) } + pub fn parse_alter_index(&mut self) -> Result { + let index_name = self.parse_object_name()?; + let operation = if self.parse_keyword(Keyword::RENAME) { + if self.parse_keyword(Keyword::TO) { + let index_name = self.parse_object_name()?; + AlterIndexOperation::RenameIndex { index_name } + } else { + return self.expected("TO after RENAME", self.peek_token()); + } + } else { + return self.expected("RENAME after ALTER INDEX", self.peek_token()); + }; + + Ok(Statement::AlterIndex { + name: index_name, + operation, + }) + } + + pub fn parse_alter_view(&mut self, materialized: bool) -> Result { + let view_name = self.parse_object_name()?; + let operation = if self.parse_keyword(Keyword::RENAME) { + if self.parse_keyword(Keyword::TO) { + let view_name = self.parse_object_name()?; + AlterViewOperation::RenameView { view_name } + } else { + return self.expected("TO after RENAME", self.peek_token()); + } + } else { + return self.expected( + &format!( + "RENAME after ALTER {}VIEW", + if materialized { "MATERIALIZED " } else { "" } + ), + self.peek_token(), + ); + }; + + Ok(Statement::AlterView { + materialized, + name: view_name, + operation, + }) + } + + pub fn parse_alter_sink(&mut self) -> Result { + let sink_name = self.parse_object_name()?; + let operation = if self.parse_keyword(Keyword::RENAME) { + if self.parse_keyword(Keyword::TO) { + let sink_name = self.parse_object_name()?; + AlterSinkOperation::RenameSink { sink_name } + } else { + return self.expected("TO after RENAME", self.peek_token()); + } + } else { + return self.expected("RENAME after ALTER SINK", self.peek_token()); + }; + + Ok(Statement::AlterSink { + name: sink_name, + operation, + }) + } + pub fn parse_alter_system(&mut self) -> Result { self.expect_keyword(Keyword::SET)?; let param = self.parse_identifier()?; @@ -2455,7 +2623,7 @@ impl Parser { let mut values = vec![]; let mut content = String::from(""); while let Some(t) = self.next_token_no_skip() { - match t { + match t.token { Token::Whitespace(Whitespace::Tab) => { values.push(Some(content.to_string())); content.clear(); @@ -2468,7 +2636,7 @@ impl Parser { if self.consume_token(&Token::Period) { return values; } - if let Token::Word(w) = self.next_token() { + if let Token::Word(w) = self.next_token().token { if w.value == "N" { values.push(None); } @@ -2484,7 +2652,8 @@ impl Parser { /// Parse a literal value (numbers, strings, date/time, booleans) fn parse_value(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::TRUE => Ok(Value::Boolean(true)), Keyword::FALSE => Ok(Value::Boolean(false)), @@ -2492,9 +2661,12 @@ impl Parser { Keyword::NoKeyword if w.quote_style.is_some() => match w.quote_style { Some('"') => Ok(Value::DoubleQuotedString(w.value)), Some('\'') => Ok(Value::SingleQuotedString(w.value)), - _ => self.expected("A value?", Token::Word(w))?, + _ => self.expected("A value?", Token::Word(w).with_location(token.location))?, }, - _ => self.expected("a concrete value", Token::Word(w)), + _ => self.expected( + "a concrete value", + Token::Word(w).with_location(token.location), + ), }, Token::Number(ref n) => Ok(Value::Number(n.clone())), Token::SingleQuotedString(ref s) => Ok(Value::SingleQuotedString(s.to_string())), @@ -2502,13 +2674,13 @@ impl Parser { Token::CstyleEscapesString(ref s) => Ok(Value::CstyleEscapesString(s.to_string())), Token::NationalStringLiteral(ref s) => Ok(Value::NationalStringLiteral(s.to_string())), Token::HexStringLiteral(ref s) => Ok(Value::HexStringLiteral(s.to_string())), - unexpected => self.expected("a value", unexpected), + unexpected => self.expected("a value", unexpected.with_location(token.location)), } } fn parse_set_variable(&mut self) -> Result { let token = self.peek_token(); - match (self.parse_value(), token) { + match (self.parse_value(), token.token) { (Ok(value), _) => Ok(SetVariableValue::Literal(value)), (Err(_), Token::Word(w)) => { if w.keyword == Keyword::DEFAULT { @@ -2517,7 +2689,9 @@ impl Parser { Ok(SetVariableValue::Ident(w.to_ident()?)) } } - (Err(_), unexpected) => self.expected("variable value", unexpected), + (Err(_), unexpected) => { + self.expected("variable value", unexpected.with_location(token.location)) + } } } @@ -2533,17 +2707,18 @@ impl Parser { /// Parse an unsigned literal integer/long pub fn parse_literal_uint(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Number(s) => s.parse::().map_err(|e| { ParserError::ParserError(format!("Could not parse '{}' as u64: {}", s, e)) }), - unexpected => self.expected("literal int", unexpected), + unexpected => self.expected("literal int", unexpected.with_location(token.location)), } } pub fn parse_function_definition(&mut self) -> Result { let peek_token = self.peek_token(); - match peek_token { + match peek_token.token { Token::DollarQuotedString(value) => { self.next_token(); Ok(FunctionDefinition::DoubleDollarDef(value.value)) @@ -2556,16 +2731,18 @@ impl Parser { /// Parse a literal string pub fn parse_literal_string(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(Word { value, keyword, .. }) if keyword == Keyword::NoKeyword => Ok(value), Token::SingleQuotedString(s) => Ok(s), - unexpected => self.expected("literal string", unexpected), + unexpected => self.expected("literal string", unexpected.with_location(token.location)), } } /// Parse a map key string pub fn parse_map_key(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(Word { value, keyword, .. }) if keyword == Keyword::NoKeyword => { if self.peek_token() == Token::LParen { return self.parse_function(ObjectName(vec![Ident::new_unchecked(value)])); @@ -2574,7 +2751,10 @@ impl Parser { } Token::SingleQuotedString(s) => Ok(Expr::Value(Value::SingleQuotedString(s))), Token::Number(s) => Ok(Expr::Value(Value::Number(s))), - unexpected => self.expected("literal string, number or function", unexpected), + unexpected => self.expected( + "literal string, number or function", + unexpected.with_location(token.location), + ), } } @@ -2598,7 +2778,7 @@ impl Parser { self.angle_brackets_num += 1; loop { - if let Token::Word(_) = self.peek_token() { + if let Token::Word(_) = self.peek_token().token { let name = self.parse_identifier_non_reserved()?; let data_type = self.parse_data_type()?; columns.push(StructField { name, data_type }) @@ -2627,7 +2807,8 @@ impl Parser { /// Parse a SQL datatype pub fn parse_data_type_inner(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => match w.keyword { Keyword::BOOLEAN | Keyword::BOOL => Ok(DataType::Boolean), Keyword::FLOAT => { @@ -2700,7 +2881,9 @@ impl Parser { Ok(DataType::Custom(type_name)) } }, - unexpected => self.expected("a data type name", unexpected), + unexpected => { + self.expected("a data type name", unexpected.with_location(token.location)) + } } } @@ -2712,7 +2895,8 @@ impl Parser { reserved_kwds: &[Keyword], ) -> Result, ParserError> { let after_as = self.parse_keyword(Keyword::AS); - match self.next_token() { + let token = self.next_token(); + match token.token { // Accept any identifier after `AS` (though many dialects have restrictions on // keywords that may appear here). If there's no `AS`: don't parse keywords, // which may start a construct allowed in this position, to be parsed as aliases. @@ -2723,7 +2907,10 @@ impl Parser { } not_an_ident => { if after_as { - return self.expected("an identifier after AS", not_an_ident); + return self.expected( + "an identifier after AS", + not_an_ident.with_location(token.location), + ); } self.prev_token(); Ok(None) // no alias found @@ -2748,6 +2935,22 @@ impl Parser { } } + pub fn parse_for_system_time_as_of_now(&mut self) -> Result { + let after_for = self.parse_keyword(Keyword::FOR); + if after_for { + self.expect_keywords(&[Keyword::SYSTEM_TIME, Keyword::AS, Keyword::OF])?; + let ident = self.parse_identifier()?; + if ident.real_value() != "now" { + return parser_err!(format!("Expected now, found: {}", ident.real_value())); + } + self.expect_token(&Token::LParen)?; + self.expect_token(&Token::RParen)?; + Ok(true) + } else { + Ok(false) + } + } + /// Parse a possibly qualified, possibly quoted identifier, e.g. /// `foo` or `myschema."table" pub fn parse_object_name(&mut self) -> Result { @@ -2765,7 +2968,7 @@ impl Parser { pub fn parse_identifiers_non_keywords(&mut self) -> Result, ParserError> { let mut idents = vec![]; loop { - match self.peek_token() { + match self.peek_token().token { Token::Word(w) => { if w.keyword != Keyword::NoKeyword { break; @@ -2787,7 +2990,8 @@ impl Parser { pub fn parse_identifiers(&mut self) -> Result, ParserError> { let mut idents = vec![]; loop { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => { idents.push(w.to_ident()?); } @@ -2801,22 +3005,24 @@ impl Parser { /// Parse a simple one-word identifier (possibly quoted, possibly a keyword) pub fn parse_identifier(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => Ok(w.to_ident()?), - unexpected => self.expected("identifier", unexpected), + unexpected => self.expected("identifier", unexpected.with_location(token.location)), } } /// Parse a simple one-word identifier (possibly quoted, possibly a non-reserved keyword) pub fn parse_identifier_non_reserved(&mut self) -> Result { - match self.next_token() { + let token = self.next_token(); + match token.token { Token::Word(w) => { match keywords::RESERVED_FOR_COLUMN_OR_TABLE_NAME.contains(&w.keyword) { true => parser_err!(format!("syntax error at or near \"{w}\"")), false => Ok(w.to_ident()?), } } - unexpected => self.expected("identifier", unexpected), + unexpected => self.expected("identifier", unexpected.with_location(token.location)), } } @@ -3114,7 +3320,7 @@ impl Parser { loop { // The query can be optionally followed by a set operator: - let op = self.parse_set_operator(&self.peek_token()); + let op = self.parse_set_operator(&self.peek_token().token); let next_precedence = match op { // UNION and EXCEPT have the same binding power and evaluate left-to-right Some(SetOperator::Union) | Some(SetOperator::Except) => 10, @@ -3226,6 +3432,23 @@ impl Parser { pub fn parse_set(&mut self) -> Result { let modifier = self.parse_one_of_keywords(&[Keyword::SESSION, Keyword::LOCAL]); + if self.parse_keywords(&[Keyword::TIME, Keyword::ZONE]) { + let value = if self.parse_keyword(Keyword::DEFAULT) { + SetTimeZoneValue::Default + } else if self.parse_keyword(Keyword::LOCAL) { + SetTimeZoneValue::Local + } else if let Ok(ident) = self.parse_identifier() { + SetTimeZoneValue::Ident(ident) + } else { + let value = self.parse_value()?; + SetTimeZoneValue::Literal(value) + }; + + return Ok(Statement::SetTimeZone { + local: modifier == Some(Keyword::LOCAL), + value, + }); + } let variable = self.parse_identifier()?; if self.consume_token(&Token::Eq) || self.parse_keyword(Keyword::TO) { let mut values = vec![]; @@ -3272,7 +3495,7 @@ impl Parser { /// otherwise, return `Statement::ShowVariable`. pub fn parse_show(&mut self) -> Result { let index = self.index; - if let Token::Word(w) = self.next_token() { + if let Token::Word(w) = self.next_token().token { match w.keyword { Keyword::TABLES => { return Ok(Statement::ShowObjects(ShowObject::Table { @@ -3345,7 +3568,7 @@ impl Parser { /// Parse object type and name after `show create`. pub fn parse_show_create(&mut self) -> Result { - if let Token::Word(w) = self.next_token() { + if let Token::Word(w) = self.next_token().token { let show_type = match w.keyword { Keyword::TABLE => ShowCreateType::Table, Keyword::MATERIALIZED => { @@ -3398,7 +3621,7 @@ impl Parser { } } else { let natural = self.parse_keyword(Keyword::NATURAL); - let peek_keyword = if let Token::Word(w) = self.peek_token() { + let peek_keyword = if let Token::Word(w) = self.peek_token().token { w.keyword } else { Keyword::NoKeyword @@ -3473,7 +3696,7 @@ impl Parser { // It can only be a subquery. We don't use `maybe_parse` so that a meaningful error can // be returned. - match self.peek_token() { + match self.peek_token().token { Token::Word(w) if [Keyword::SELECT, Keyword::WITH, Keyword::VALUES].contains(&w.keyword) => { @@ -3527,8 +3750,13 @@ impl Parser { let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?; Ok(TableFactor::TableFunction { name, alias, args }) } else { + let for_system_time_as_of_now = self.parse_for_system_time_as_of_now()?; let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?; - Ok(TableFactor::Table { name, alias }) + Ok(TableFactor::Table { + name, + alias, + for_system_time_as_of_now, + }) } } } @@ -4023,7 +4251,7 @@ impl Parser { self.expect_keyword(Keyword::ON)?; let token = self.next_token(); - let (object_type, object_name) = match token { + let (object_type, object_name) = match token.token { Token::Word(w) if w.keyword == Keyword::COLUMN => { let object_name = self.parse_object_name()?; (CommentObject::Column, object_name) diff --git a/src/sqlparser/src/test_utils.rs b/src/sqlparser/src/test_utils.rs index 2a6ea6438a324..00d454a2fc992 100644 --- a/src/sqlparser/src/test_utils.rs +++ b/src/sqlparser/src/test_utils.rs @@ -34,7 +34,7 @@ where F: Fn(&mut Parser) -> T, { let mut tokenizer = Tokenizer::new(sql); - let tokens = tokenizer.tokenize().unwrap(); + let tokens = tokenizer.tokenize_with_location().unwrap(); f(&mut Parser::new(tokens)) } @@ -138,6 +138,7 @@ pub fn table_alias(name: impl Into) -> Option { pub fn table(name: impl Into) -> TableFactor { TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked(name.into())]), + for_system_time_as_of_now: false, alias: None, } } diff --git a/src/sqlparser/src/tokenizer.rs b/src/sqlparser/src/tokenizer.rs index ed1e1ef4831d2..dbef78d14e98b 100644 --- a/src/sqlparser/src/tokenizer.rs +++ b/src/sqlparser/src/tokenizer.rs @@ -25,6 +25,7 @@ use alloc::{ vec::Vec, }; use core::fmt; +use core::fmt::Debug; use core::iter::Peekable; use core::str::Chars; @@ -237,6 +238,10 @@ impl Token { }, }) } + + pub fn with_location(self, location: Location) -> TokenWithLocation { + TokenWithLocation::new(self, location.line, location.column) + } } /// A keyword (like SELECT) or an optionally quoted SQL identifier @@ -300,6 +305,61 @@ impl fmt::Display for Whitespace { } } +/// Location in input string +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct Location { + /// Line number, starting from 1 + pub line: u64, + /// Line column, starting from 1 + pub column: u64, +} + +/// A [Token] with [Location] attached to it +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct TokenWithLocation { + pub token: Token, + pub location: Location, +} + +impl TokenWithLocation { + pub fn new(token: Token, line: u64, column: u64) -> TokenWithLocation { + TokenWithLocation { + token, + location: Location { line, column }, + } + } + + pub fn wrap(token: Token) -> TokenWithLocation { + TokenWithLocation::new(token, 0, 0) + } +} + +impl PartialEq for TokenWithLocation { + fn eq(&self, other: &Token) -> bool { + &self.token == other + } +} + +impl PartialEq for Token { + fn eq(&self, other: &TokenWithLocation) -> bool { + self == &other.token + } +} + +impl fmt::Display for TokenWithLocation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.token == Token::EOF { + write!(f, "EOF at the end") + } else { + write!( + f, + "{} at line:{}, column:{}", + self.token, self.location.line, self.location.column + ) + } + } +} + /// Tokenizer error #[derive(Debug, PartialEq)] pub struct TokenizerError { @@ -338,11 +398,11 @@ impl<'a> Tokenizer<'a> { } } - /// Tokenize the statement and produce a vector of tokens - pub fn tokenize(&mut self) -> Result, TokenizerError> { + /// Tokenize the statement and produce a vector of tokens with locations. + pub fn tokenize_with_location(&mut self) -> Result, TokenizerError> { let mut peekable = self.query.chars().peekable(); - let mut tokens: Vec = vec![]; + let mut tokens: Vec = vec![]; while let Some(token) = self.next_token(&mut peekable)? { match &token { @@ -359,11 +419,20 @@ impl<'a> Tokenizer<'a> { _ => self.col += 1, } - tokens.push(token); + let token_with_location = TokenWithLocation::new(token, self.line, self.col); + + tokens.push(token_with_location); } Ok(tokens) } + /// Tokenize the statement and produce a vector of tokens without locations. + #[allow(dead_code)] + fn tokenize(&mut self) -> Result, TokenizerError> { + self.tokenize_with_location() + .map(|v| v.into_iter().map(|t| t.token).collect()) + } + /// Get the next token or return None fn next_token(&self, chars: &mut Peekable>) -> Result, TokenizerError> { match chars.peek() { diff --git a/src/sqlparser/test_runner/Cargo.toml b/src/sqlparser/test_runner/Cargo.toml index f33c8cf1b218f..d1cd39edf1a93 100644 --- a/src/sqlparser/test_runner/Cargo.toml +++ b/src/sqlparser/test_runner/Cargo.toml @@ -11,11 +11,27 @@ normal = ["workspace-hack"] [dependencies] anyhow = "1" +console = "0.15" +futures = { version = "0.3", default-features = false, features = ["alloc"] } risingwave_sqlparser = { path = "../" } serde = { version = "1", features = ["derive"] } +serde_with = "2" serde_yaml = "0.9" +tokio = { version = "0.2", package = "madsim-tokio", features = [ + "rt", + "rt-multi-thread", + "sync", + "macros", + "time", + "signal", + "fs", +] } walkdir = "2" +[[bin]] +name = "parser-test-apply" +path = "src/bin/apply.rs" + [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../../workspace-hack" } diff --git a/src/sqlparser/test_runner/sqlparser_test.toml b/src/sqlparser/test_runner/sqlparser_test.toml new file mode 100644 index 0000000000000..32990be7df6df --- /dev/null +++ b/src/sqlparser/test_runner/sqlparser_test.toml @@ -0,0 +1,55 @@ +[tasks.update-parser-test] +description = "Update parser test data" +private = true +script = ''' +#!/usr/bin/env bash +set -e +cargo run --bin parser-test-apply +''' + +[tasks.apply-parser-test] +description = "Generate parser test data" +dependencies = [ + "update-parser-test" +] +script = ''' +#!/usr/bin/env bash +set -e +cd src/sqlparser/tests/testdata/ + +for f in *.apply.yaml +do + diff "$f" "$(basename "$f" .apply.yaml).yaml" || true +done + +echo "If you want to apply the parser test data, run: $(tput setaf 2)./risedev do-apply-parser-test$(tput sgr 0)" +''' +category = "RiseDev - Test" + +[tasks.do-apply-parser-test] +description = "Apply parser test data" +dependencies = [ + "update-parser-test" +] +script = ''' +#!/usr/bin/env bash +set -e +cd src/sqlparser/tests/testdata/ + +for f in *.apply.yaml +do + SOURCE="$(basename $f .apply.yaml).yaml" + if [ -f "$SOURCE" ]; then + cat < temp.apply.yaml +# This file is automatically generated. See \`src/sqlparser/test_runner/src/bin/apply.rs\` for more information. +EOF + cat "$f" >> temp.apply.yaml + mv temp.apply.yaml "$SOURCE" + fi +done + +rm *.apply.yaml + +echo "$(tput setaf 2)Diff applied!$(tput sgr 0)" +''' +category = "RiseDev - Test" diff --git a/src/sqlparser/test_runner/src/bin/apply.rs b/src/sqlparser/test_runner/src/bin/apply.rs new file mode 100644 index 0000000000000..1c1318b8c809a --- /dev/null +++ b/src/sqlparser/test_runner/src/bin/apply.rs @@ -0,0 +1,121 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::path::Path; +use std::sync::{Arc, Mutex}; + +use anyhow::Result; +use console::style; +use futures::future::try_join_all; +use risingwave_sqlparser::ast::Statement; +use risingwave_sqlparser::parser::Parser; +use risingwave_sqlparser_test_runner::TestCase; +use walkdir::WalkDir; + +#[tokio::main] +async fn main() -> Result<()> { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let dir = Path::new(manifest_dir) + .parent() + .unwrap() + .join("tests") + .join("testdata"); + println!("Using test cases from {:?}", dir); + + let mut futs = vec![]; + + let log_lock = Arc::new(Mutex::new(())); + + for entry in WalkDir::new(dir) { + let entry = entry.unwrap(); + let path = entry.path(); + + if !path.is_file() { + continue; + } + + if path.is_file() + && path.extension().map_or(false, |p| { + p.eq_ignore_ascii_case("yml") || p.eq_ignore_ascii_case("yaml") + }) + && !path + .file_name() + .unwrap() + .to_string_lossy() + .ends_with(".apply.yaml") + { + let target = path.with_extension("apply.yaml"); + + let path = path.to_path_buf(); + let log_lock = Arc::clone(&log_lock); + futs.push(async move { + let file_content = tokio::fs::read_to_string(&path).await?; + + let cases: Vec = serde_yaml::from_str(&file_content)?; + + let mut new_cases = Vec::with_capacity(cases.len()); + + for case in cases { + let input = &case.input; + let ast = Parser::parse_sql(input); + let actual_case = match ast { + Ok(ast) => { + let [ast]: [Statement; 1] = ast + .try_into() + .expect("Only one statement is supported now."); + + let actual_formatted_sql = + case.formatted_sql.as_ref().map(|_| format!("{}", ast)); + let actual_formatted_ast = + case.formatted_ast.as_ref().map(|_| format!("{:?}", ast)); + + TestCase { + input: input.clone(), + formatted_sql: actual_formatted_sql, + formatted_ast: actual_formatted_ast, + error_msg: None, + } + } + Err(err) => { + let actual_error_msg = format!("{}", err); + TestCase { + input: input.clone(), + formatted_sql: None, + formatted_ast: None, + error_msg: Some(actual_error_msg), + } + } + }; + + if actual_case != case { + let _guard = log_lock.lock(); + println!("{}\n{}\n", style(&case).red(), style(&actual_case).green()) + } + + new_cases.push(actual_case); + } + + let output_content = serde_yaml::to_string(&new_cases)?; + + tokio::fs::write(target, output_content).await?; + + Ok::<_, anyhow::Error>(()) + }); + } + } + + let _res = try_join_all(futs).await?; + + Ok(()) +} diff --git a/src/sqlparser/test_runner/src/lib.rs b/src/sqlparser/test_runner/src/lib.rs index 4ffc2381c95cc..2f9b395026a87 100644 --- a/src/sqlparser/test_runner/src/lib.rs +++ b/src/sqlparser/test_runner/src/lib.rs @@ -14,17 +14,27 @@ // Data-driven tests. +use std::fmt::Display; + use anyhow::{anyhow, Result}; use risingwave_sqlparser::parser::Parser; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; /// `TestCase` will be deserialized from yaml. -#[derive(PartialEq, Eq, Debug, Deserialize)] -struct TestCase { - input: String, - formatted_sql: Option, - error_msg: Option, - formatted_ast: Option, +#[serde_with::skip_serializing_none] +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct TestCase { + pub input: String, + pub formatted_sql: Option, + pub error_msg: Option, + pub formatted_ast: Option, +} + +impl Display for TestCase { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&serde_yaml::to_string(self).unwrap()) + } } fn run_test_case(c: TestCase) -> Result<()> { @@ -97,7 +107,14 @@ pub fn run_all_test_files() { use walkdir::WalkDir; for entry in WalkDir::new("../tests/testdata/") { let entry = entry.unwrap(); - if !entry.path().is_file() { + if !(entry.path().is_file()) { + continue; + } + if !(entry + .path() + .extension() + .map_or(false, |p| p.eq_ignore_ascii_case("yaml"))) + { continue; } let file_content = std::fs::read_to_string(entry.path()).unwrap(); diff --git a/src/sqlparser/tests/sqlparser_common.rs b/src/sqlparser/tests/sqlparser_common.rs index f0821fe87685c..361940cc3d0b0 100644 --- a/src/sqlparser/tests/sqlparser_common.rs +++ b/src/sqlparser/tests/sqlparser_common.rs @@ -20,6 +20,7 @@ #[macro_use] mod test_utils; use matches::assert_matches; +use risingwave_sqlparser::ast::JoinOperator::Inner; use risingwave_sqlparser::ast::*; use risingwave_sqlparser::keywords::ALL_KEYWORDS; use risingwave_sqlparser::parser::ParserError; @@ -126,16 +127,12 @@ fn parse_update() { let sql = "UPDATE t WHERE 1"; let res = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError("Expected SET, found: WHERE".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected SET, found: WHERE")); let sql = "UPDATE t SET a = 1 extrabadstuff"; let res = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: extrabadstuff".to_string()), - res.unwrap_err() + assert!( + format!("{}", res.unwrap_err()).contains("Expected end of statement, found: extrabadstuff") ); } @@ -252,10 +249,7 @@ fn parse_select_all() { #[test] fn parse_select_all_distinct() { let result = parse_sql_statements("SELECT ALL DISTINCT name FROM customer"); - assert_eq!( - ParserError::ParserError("syntax error at or near \"DISTINCT\"".to_string()), - result.unwrap_err(), - ); + assert!(format!("{}", result.unwrap_err()).contains("syntax error at or near \"DISTINCT\"")); } #[test] @@ -283,10 +277,7 @@ fn parse_select_wildcard() { let sql = "SELECT * + * FROM foo;"; let result = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: +".to_string()), - result.unwrap_err(), - ); + assert!(format!("{}", result.unwrap_err()).contains("Expected end of statement, found: +")); } #[test] @@ -323,16 +314,10 @@ fn parse_column_aliases() { #[test] fn test_eof_after_as() { let res = parse_sql_statements("SELECT foo AS"); - assert_eq!( - ParserError::ParserError("Expected an identifier after AS, found: EOF".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected an identifier after AS, found: EOF")); let res = parse_sql_statements("SELECT 1 FROM foo AS"); - assert_eq!( - ParserError::ParserError("Expected an identifier after AS, found: EOF".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected an identifier after AS, found: EOF")); } #[test] @@ -378,19 +363,13 @@ fn parse_select_count_distinct() { let sql = "SELECT COUNT(ALL DISTINCT + x) FROM customer"; let res = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError("Cannot specify both ALL and DISTINCT".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Cannot specify both ALL and DISTINCT")); } #[test] fn parse_invalid_infix_not() { let res = parse_sql_statements("SELECT c FROM t WHERE c NOT ("); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: NOT".to_string()), - res.unwrap_err(), - ); + assert!(format!("{}", res.unwrap_err(),).contains("Expected end of statement, found: NOT")); } #[test] @@ -1191,10 +1170,7 @@ fn parse_extract() { verified_stmt("SELECT EXTRACT(SECOND FROM d)"); let res = parse_sql_statements("SELECT EXTRACT(0 FROM d)"); - assert_eq!( - ParserError::ParserError("Expected date/time field, found: 0".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected date/time field, found: 0")); } #[test] @@ -1707,18 +1683,12 @@ fn parse_alter_table_alter_column_type() { #[test] fn parse_bad_constraint() { let res = parse_sql_statements("ALTER TABLE tab ADD"); - assert_eq!( - ParserError::ParserError("Expected identifier, found: EOF".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected identifier, found: EOF")); let res = parse_sql_statements("CREATE TABLE tab (foo int,"); - assert_eq!( - ParserError::ParserError( - "Expected column name or constraint definition, found: EOF".to_string() - ), - res.unwrap_err() - ); + + assert!(format!("{}", res.unwrap_err()) + .contains("Expected column name or constraint definition, found: EOF")); } fn run_explain_analyze(query: &str, expected_analyze: bool, expected_options: ExplainOptions) { @@ -1821,22 +1791,16 @@ fn parse_explain_with_invalid_options() { assert!(res.is_err()); let res = parse_sql_statements("EXPLAIN (VERBOSE TRACE) SELECT sqrt(id) FROM foo"); - assert_eq!( - ParserError::ParserError("Expected ), found: TRACE".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected ), found: TRACE")); let res = parse_sql_statements("EXPLAIN () SELECT sqrt(id) FROM foo"); assert!(res.is_err()); let res = parse_sql_statements("EXPLAIN (VERBOSE, ) SELECT sqrt(id) FROM foo"); - assert_eq!( - ParserError::ParserError( - "Expected one of VERBOSE or TRACE or TYPE or LOGICAL or PHYSICAL or DISTSQL, found: )" - .to_string() - ), - res.unwrap_err() - ); + + let err_msg = + "Expected one of VERBOSE or TRACE or TYPE or LOGICAL or PHYSICAL or DISTSQL, found: )"; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); } #[test] @@ -2140,16 +2104,10 @@ fn parse_literal_interval() { ); let result = parse_sql_statements("SELECT INTERVAL '1' SECOND TO SECOND"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: SECOND".to_string()), - result.unwrap_err(), - ); + assert!(format!("{}", result.unwrap_err()).contains("Expected end of statement, found: SECOND")); let result = parse_sql_statements("SELECT INTERVAL '10' HOUR (1) TO HOUR (2)"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: (".to_string()), - result.unwrap_err(), - ); + assert!(format!("{}", result.unwrap_err()).contains("Expected end of statement, found: (")); verified_only_select("SELECT INTERVAL '1' YEAR"); verified_only_select("SELECT INTERVAL '1' MONTH"); @@ -2192,12 +2150,17 @@ fn parse_delimited_identifiers() { ); // check FROM match only(select.from).relation { - TableFactor::Table { name, alias } => { + TableFactor::Table { + name, + alias, + for_system_time_as_of_now, + } => { assert_eq!(vec![Ident::with_quote_unchecked('"', "a table")], name.0); assert_eq!( Ident::with_quote_unchecked('"', "alias"), alias.unwrap().name ); + assert!(!for_system_time_as_of_now); } _ => panic!("Expecting TableFactor::Table"), } @@ -2324,6 +2287,7 @@ fn parse_implicit_join() { relation: TableFactor::Table { name: ObjectName(vec!["t1".into()]), alias: None, + for_system_time_as_of_now: false, }, joins: vec![], }, @@ -2331,6 +2295,7 @@ fn parse_implicit_join() { relation: TableFactor::Table { name: ObjectName(vec!["t2".into()]), alias: None, + for_system_time_as_of_now: false, }, joins: vec![], } @@ -2346,11 +2311,13 @@ fn parse_implicit_join() { relation: TableFactor::Table { name: ObjectName(vec!["t1a".into()]), alias: None, + for_system_time_as_of_now: false, }, joins: vec![Join { relation: TableFactor::Table { name: ObjectName(vec!["t1b".into()]), alias: None, + for_system_time_as_of_now: false, }, join_operator: JoinOperator::Inner(JoinConstraint::Natural), }] @@ -2359,11 +2326,13 @@ fn parse_implicit_join() { relation: TableFactor::Table { name: ObjectName(vec!["t2a".into()]), alias: None, + for_system_time_as_of_now: false, }, joins: vec![Join { relation: TableFactor::Table { name: ObjectName(vec!["t2b".into()]), alias: None, + for_system_time_as_of_now: false, }, join_operator: JoinOperator::Inner(JoinConstraint::Natural), }] @@ -2382,6 +2351,7 @@ fn parse_cross_join() { relation: TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked("t2")]), alias: None, + for_system_time_as_of_now: false, }, join_operator: JoinOperator::CrossJoin }, @@ -2389,6 +2359,27 @@ fn parse_cross_join() { ); } +#[test] +fn parse_temporal_join() { + let sql = "SELECT * FROM t1 JOIN t2 FOR SYSTEM_TIME AS OF NOW() ON c1 = c2"; + let select = verified_only_select(sql); + assert_eq!( + Join { + relation: TableFactor::Table { + name: ObjectName(vec![Ident::new_unchecked("t2")]), + alias: None, + for_system_time_as_of_now: true, + }, + join_operator: Inner(JoinConstraint::On(Expr::BinaryOp { + left: Box::new(Expr::Identifier("c1".into())), + op: BinaryOperator::Eq, + right: Box::new(Expr::Identifier("c2".into())), + })) + }, + only(only(select.from).joins), + ); +} + #[test] fn parse_joins_on() { fn join_with_constraint( @@ -2400,6 +2391,7 @@ fn parse_joins_on() { relation: TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked(relation.into())]), alias, + for_system_time_as_of_now: false, }, join_operator: f(JoinConstraint::On(Expr::BinaryOp { left: Box::new(Expr::Identifier("c1".into())), @@ -2451,6 +2443,7 @@ fn parse_joins_using() { relation: TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked(relation.into())]), alias, + for_system_time_as_of_now: false, }, join_operator: f(JoinConstraint::Using(vec!["c1".into()])), } @@ -2494,6 +2487,7 @@ fn parse_natural_join() { relation: TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked("t2")]), alias: None, + for_system_time_as_of_now: false, }, join_operator: f(JoinConstraint::Natural), } @@ -2516,10 +2510,8 @@ fn parse_natural_join() { ); let sql = "SELECT * FROM t1 natural"; - assert_eq!( - ParserError::ParserError("Expected a join type after NATURAL, found: EOF".to_string()), - parse_sql_statements(sql).unwrap_err(), - ); + assert!(format!("{}", parse_sql_statements(sql).unwrap_err(),) + .contains("Expected a join type after NATURAL, found: EOF")); } #[test] @@ -2583,10 +2575,7 @@ fn parse_join_syntax_variants() { ); let res = parse_sql_statements("SELECT * FROM a OUTER JOIN b ON 1"); - assert_eq!( - ParserError::ParserError("Expected LEFT, RIGHT, or FULL, found: OUTER".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected LEFT, RIGHT, or FULL, found: OUTER")); } #[test] @@ -2725,6 +2714,7 @@ fn parse_derived_tables() { relation: TableFactor::Table { name: ObjectName(vec!["t2".into()]), alias: None, + for_system_time_as_of_now: false, }, join_operator: JoinOperator::Inner(JoinConstraint::Natural), }], @@ -2775,10 +2765,8 @@ fn parse_multiple_statements() { one_statement_parses_to(&(sql1.to_owned() + ";"), sql1); // Check that forgetting the semicolon results in an error: let res = parse_sql_statements(&(sql1.to_owned() + " " + sql2_kw + sql2_rest)); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: ".to_string() + sql2_kw), - res.unwrap_err() - ); + let err_msg = "Expected end of statement, found: "; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); } test_with("SELECT foo", "SELECT", " bar"); // ensure that SELECT/WITH is not parsed as a table or column alias if ';' @@ -2840,25 +2828,24 @@ fn parse_overlay() { "SELECT OVERLAY('abc' PLACING 'xyz' FROM 1 FOR 2)", ); - assert_eq!( - parse_sql_statements("SELECT OVERLAY('abc', 'xyz')").unwrap_err(), - ParserError::ParserError("Expected PLACING, found: ,".to_owned()) - ); - - assert_eq!( - parse_sql_statements("SELECT OVERLAY('abc' PLACING 'xyz')").unwrap_err(), - ParserError::ParserError("Expected FROM, found: )".to_owned()) - ); - - assert_eq!( - parse_sql_statements("SELECT OVERLAY('abc' PLACING 'xyz' FOR 2)").unwrap_err(), - ParserError::ParserError("Expected FROM, found: FOR".to_owned()) - ); - - assert_eq!( - parse_sql_statements("SELECT OVERLAY('abc' PLACING 'xyz' FOR 2 FROM 1)").unwrap_err(), - ParserError::ParserError("Expected FROM, found: FOR".to_owned()) - ); + for (sql, err_msg) in [ + ("SELECT OVERLAY('abc', 'xyz')", "Expected PLACING, found: ,"), + ( + "SELECT OVERLAY('abc' PLACING 'xyz')", + "Expected FROM, found: )", + ), + ( + "SELECT OVERLAY('abc' PLACING 'xyz' FOR 2)", + "Expected FROM, found: FOR", + ), + ( + "SELECT OVERLAY('abc' PLACING 'xyz' FOR 2 FROM 1)", + "Expected FROM, found: FOR", + ), + ] { + let res = parse_sql_statements(sql); + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); + } } #[test] @@ -2880,10 +2867,10 @@ fn parse_trim() { one_statement_parses_to("SELECT TRIM(' foo ')", "SELECT TRIM(' foo ')"); - assert_eq!( - ParserError::ParserError("Expected ), found: 'xyz'".to_owned()), - parse_sql_statements("SELECT TRIM(FOO 'xyz' FROM 'xyzfooxyz')").unwrap_err() - ); + let res = parse_sql_statements("SELECT TRIM(FOO 'xyz' FROM 'xyzfooxyz')"); + + let err_msg = "Expected ), found: 'xyz'"; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); } #[test] @@ -2910,20 +2897,13 @@ fn parse_exists_subquery() { verified_stmt("SELECT EXISTS (SELECT 1)"); let res = parse_sql_statements("SELECT EXISTS ("); - assert_eq!( - ParserError::ParserError( - "Expected SELECT, VALUES, or a subquery in the query body, found: EOF".to_string() - ), - res.unwrap_err(), - ); + let err_msg = "Expected SELECT, VALUES, or a subquery in the query body, found: EOF"; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); let res = parse_sql_statements("SELECT EXISTS (NULL)"); - assert_eq!( - ParserError::ParserError( - "Expected SELECT, VALUES, or a subquery in the query body, found: NULL".to_string() - ), - res.unwrap_err(), - ); + + let err_msg = "Expected SELECT, VALUES, or a subquery in the query body, found: NULL"; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); } #[test] @@ -3162,16 +3142,12 @@ fn parse_drop_table() { }; let sql = "DROP TABLE"; - assert_eq!( - ParserError::ParserError("Expected identifier, found: EOF".to_string()), - parse_sql_statements(sql).unwrap_err(), - ); + assert!(format!("{}", parse_sql_statements(sql).unwrap_err(),) + .contains("Expected identifier, found: EOF")); let sql = "DROP TABLE IF EXISTS foo CASCADE RESTRICT"; - assert_eq!( - ParserError::ParserError("Expected end of statement, found: RESTRICT".to_string()), - parse_sql_statements(sql).unwrap_err(), - ); + assert!(format!("{}", parse_sql_statements(sql).unwrap_err(),) + .contains("Expected end of statement, found: RESTRICT")); } #[test] @@ -3232,10 +3208,7 @@ fn parse_create_user() { #[test] fn parse_invalid_subquery_without_parens() { let res = parse_sql_statements("SELECT SELECT 1 FROM bar WHERE 1=1 FROM baz"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: 1".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected end of statement, found: 1")); } #[test] @@ -3396,21 +3369,13 @@ fn lateral_derived() { let sql = "SELECT * FROM customer LEFT JOIN LATERAL generate_series(1, customer.id)"; let res = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError( - "Expected subquery after LATERAL, found: generate_series".to_string() - ), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()) + .contains("Expected subquery after LATERAL, found: generate_series")); let sql = "SELECT * FROM a LEFT JOIN LATERAL (b CROSS JOIN c)"; let res = parse_sql_statements(sql); - assert_eq!( - ParserError::ParserError( - "Expected SELECT, VALUES, or a subquery in the query body, found: b".to_string() - ), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()) + .contains("Expected SELECT, VALUES, or a subquery in the query body, found: b")); } #[test] @@ -3463,22 +3428,13 @@ fn parse_start_transaction() { ); let res = parse_sql_statements("START TRANSACTION ISOLATION LEVEL BAD"); - assert_eq!( - ParserError::ParserError("Expected isolation level, found: BAD".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected isolation level, found: BAD")); let res = parse_sql_statements("START TRANSACTION BAD"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: BAD".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected end of statement, found: BAD")); let res = parse_sql_statements("START TRANSACTION READ ONLY,"); - assert_eq!( - ParserError::ParserError("Expected transaction mode, found: EOF".to_string()), - res.unwrap_err() - ); + assert!(format!("{}", res.unwrap_err()).contains("Expected transaction mode, found: EOF")); } #[test] @@ -3558,7 +3514,7 @@ fn parse_rollback() { #[test] fn parse_create_index() { - let sql = "CREATE UNIQUE INDEX IF NOT EXISTS idx_name ON test(name,age DESC) INCLUDE(other) DISTRIBUTED BY(name)"; + let sql = "CREATE UNIQUE INDEX IF NOT EXISTS idx_name ON test(name, age DESC) INCLUDE(other) DISTRIBUTED BY(name)"; let indexed_columns = vec![ OrderByExpr { expr: Expr::Identifier(Ident::new_unchecked("name")), diff --git a/src/sqlparser/tests/sqlparser_postgres.rs b/src/sqlparser/tests/sqlparser_postgres.rs index f80ca930e2895..d78e71d99e080 100644 --- a/src/sqlparser/tests/sqlparser_postgres.rs +++ b/src/sqlparser/tests/sqlparser_postgres.rs @@ -307,29 +307,27 @@ fn parse_create_table_if_not_exists() { #[test] fn parse_bad_if_not_exists() { - let res = parse_sql_statements("CREATE TABLE NOT EXISTS uk_cities ()"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: EXISTS".to_string()), - res.unwrap_err() - ); - - let res = parse_sql_statements("CREATE TABLE IF EXISTS uk_cities ()"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: EXISTS".to_string()), - res.unwrap_err() - ); - - let res = parse_sql_statements("CREATE TABLE IF uk_cities ()"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: uk_cities".to_string()), - res.unwrap_err() - ); - - let res = parse_sql_statements("CREATE TABLE IF NOT uk_cities ()"); - assert_eq!( - ParserError::ParserError("Expected end of statement, found: NOT".to_string()), - res.unwrap_err() - ); + for (sql, err_msg) in [ + ( + "CREATE TABLE NOT EXISTS uk_cities ()", + "Expected end of statement, found: EXISTS", + ), + ( + "CREATE TABLE IF EXISTS uk_cities ()", + "Expected end of statement, found: EXISTS", + ), + ( + "CREATE TABLE IF uk_cities ()", + "Expected end of statement, found: uk_cities", + ), + ( + "CREATE TABLE IF NOT uk_cities ()", + "Expected end of statement, found: NOT", + ), + ] { + let res = parse_sql_statements(sql); + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); + } } #[test] @@ -438,27 +436,14 @@ fn parse_set() { one_statement_parses_to("SET a TO b", "SET a = b"); one_statement_parses_to("SET SESSION a = b", "SET a = b"); - - assert_eq!( - parse_sql_statements("SET"), - Err(ParserError::ParserError( - "Expected identifier, found: EOF".to_string() - )), - ); - - assert_eq!( - parse_sql_statements("SET a b"), - Err(ParserError::ParserError( - "Expected equals sign or TO, found: b".to_string() - )), - ); - - assert_eq!( - parse_sql_statements("SET a ="), - Err(ParserError::ParserError( - "Expected variable value, found: EOF".to_string() - )), - ); + for (sql, err_msg) in [ + ("SET", "Expected identifier, found: EOF"), + ("SET a b", "Expected equals sign or TO, found: b"), + ("SET a =", "Expected variable value, found: EOF"), + ] { + let res = parse_sql_statements(sql); + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); + } } #[test] @@ -769,7 +754,7 @@ fn parse_create_function() { OperateFunctionArg::unnamed(DataType::Int), OperateFunctionArg::unnamed(DataType::Int), ]), - return_type: Some(DataType::Int), + returns: Some(CreateFunctionReturns::Value(DataType::Int)), params: CreateFunctionBody { language: Some("SQL".into()), behavior: Some(FunctionBehavior::Immutable), @@ -797,7 +782,7 @@ fn parse_create_function() { default_expr: Some(Expr::Value(Value::Number("1".into()))), } ]), - return_type: Some(DataType::Int), + returns: Some(CreateFunctionReturns::Value(DataType::Int)), params: CreateFunctionBody { language: Some("SQL".into()), behavior: Some(FunctionBehavior::Immutable), @@ -810,6 +795,29 @@ fn parse_create_function() { }, } ); + + let sql = "CREATE FUNCTION unnest(a INT[]) RETURNS TABLE (x INT) LANGUAGE SQL RETURN a"; + assert_eq!( + verified_stmt(sql), + Statement::CreateFunction { + or_replace: false, + temporary: false, + name: ObjectName(vec![Ident::new_unchecked("unnest")]), + args: Some(vec![OperateFunctionArg::with_name( + "a", + DataType::Array(Box::new(DataType::Int)) + ),]), + returns: Some(CreateFunctionReturns::Table(vec![TableColumnDef { + name: Ident::new_unchecked("x"), + data_type: DataType::Int, + }])), + params: CreateFunctionBody { + language: Some("SQL".into()), + return_: Some(Expr::Identifier("a".into())), + ..Default::default() + }, + } + ); } #[test] @@ -1013,7 +1021,7 @@ fn parse_array() { assert_eq!( parse_sql_statements(sql), Err(ParserError::ParserError( - "syntax error at or near '['".to_string() + "syntax error at or near '[ at line:1, column:28'".to_string() )) ); @@ -1021,7 +1029,7 @@ fn parse_array() { assert_eq!( parse_sql_statements(sql), Err(ParserError::ParserError( - "syntax error at or near '['".to_string() + "syntax error at or near '[ at line:1, column:24'".to_string() )) ); @@ -1029,7 +1037,7 @@ fn parse_array() { assert_eq!( parse_sql_statements(sql), Err(ParserError::ParserError( - "syntax error at or near 'ARRAY'".to_string() + "syntax error at or near 'ARRAY at line:1, column:27'".to_string() )) ); @@ -1037,17 +1045,14 @@ fn parse_array() { assert_eq!( parse_sql_statements(sql), Err(ParserError::ParserError( - "syntax error at or near 'ARRAY'".to_string() + "syntax error at or near 'ARRAY at line:1, column:23'".to_string() )) ); let sql = "SELECT [[1, 2], [3, 4]]"; - assert_eq!( - parse_sql_statements(sql), - Err(ParserError::ParserError( - "Expected an expression:, found: [".to_string() - )), - ); + let res = parse_sql_statements(sql); + let err_msg = "Expected an expression:, found: ["; + assert!(format!("{}", res.unwrap_err()).contains(err_msg)); } #[test] diff --git a/src/sqlparser/tests/testdata/.gitignore b/src/sqlparser/tests/testdata/.gitignore new file mode 100644 index 0000000000000..059da14bb42b3 --- /dev/null +++ b/src/sqlparser/tests/testdata/.gitignore @@ -0,0 +1 @@ +*.apply.yaml diff --git a/src/sqlparser/tests/testdata/alter.yaml b/src/sqlparser/tests/testdata/alter.yaml index 0672068780a5c..4dd0a20684923 100644 --- a/src/sqlparser/tests/testdata/alter.yaml +++ b/src/sqlparser/tests/testdata/alter.yaml @@ -1,11 +1,9 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: ALTER USER user WITH SUPERUSER CREATEDB PASSWORD 'password' formatted_sql: ALTER USER user WITH SUPERUSER CREATEDB PASSWORD 'password' - - input: ALTER USER user RENAME TO another formatted_sql: ALTER USER user RENAME TO another - - input: ALTER SYSTEM SET a = 'abc' formatted_sql: ALTER SYSTEM SET a = 'abc' - - input: ALTER SYSTEM SET a = DEFAULT formatted_sql: ALTER SYSTEM SET a = DEFAULT diff --git a/src/sqlparser/tests/testdata/array.yaml b/src/sqlparser/tests/testdata/array.yaml index b51356ae42f63..aa655652b2b7f 100644 --- a/src/sqlparser/tests/testdata/array.yaml +++ b/src/sqlparser/tests/testdata/array.yaml @@ -1,32 +1,27 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: CREATE TABLE t(a int[]); formatted_sql: CREATE TABLE t (a INT[]) - - input: CREATE TABLE t(a int[][]); formatted_sql: CREATE TABLE t (a INT[][]) - - input: CREATE TABLE t(a int[][][]); formatted_sql: CREATE TABLE t (a INT[][][]) - - input: CREATE TABLE t(a int[); - error_msg: | - sql parser error: Expected ], found: ) - + error_msg: |- + sql parser error: Expected ], found: ) at line:1, column:23 + Near "CREATE TABLE t(a int[" - input: CREATE TABLE t(a int[[]); - error_msg: | - sql parser error: Expected ], found: [ - + error_msg: |- + sql parser error: Expected ], found: [ at line:1, column:23 + Near "CREATE TABLE t(a int[" - input: CREATE TABLE t(a int]); - error_msg: | - sql parser error: Expected ',' or ')' after column definition, found: ] - + error_msg: |- + sql parser error: Expected ',' or ')' after column definition, found: ] at line:1, column:22 + Near "CREATE TABLE t(a int" - input: SELECT foo[0] FROM foos formatted_sql: SELECT foo[0] FROM foos - - input: SELECT foo[0][0] FROM foos formatted_sql: SELECT foo[0][0] FROM foos - - input: SELECT (CAST(ARRAY[ARRAY[2, 3]] AS INT[][]))[1][2] formatted_sql: SELECT (CAST(ARRAY[ARRAY[2, 3]] AS INT[][]))[1][2] - - input: SELECT ARRAY[] formatted_sql: SELECT ARRAY[] diff --git a/src/sqlparser/tests/testdata/create.yaml b/src/sqlparser/tests/testdata/create.yaml index d3ce3dc38c46d..c6ff608bd682e 100644 --- a/src/sqlparser/tests/testdata/create.yaml +++ b/src/sqlparser/tests/testdata/create.yaml @@ -1,96 +1,64 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: CREATE DATABASE t formatted_sql: CREATE DATABASE t - formatted_ast: | - CreateDatabase { db_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: false } - + formatted_ast: 'CreateDatabase { db_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: false }' - input: CREATE DATABASE IF NOT EXISTS t formatted_sql: CREATE DATABASE IF NOT EXISTS t - formatted_ast: | - CreateDatabase { db_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: true } - + formatted_ast: 'CreateDatabase { db_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: true }' - input: CREATE SCHEMA t formatted_sql: CREATE SCHEMA t - formatted_ast: | - CreateSchema { schema_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: false } - + formatted_ast: 'CreateSchema { schema_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: false }' - input: CREATE SCHEMA IF NOT EXISTS t formatted_sql: CREATE SCHEMA IF NOT EXISTS t - formatted_ast: | - CreateSchema { schema_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: true } - + formatted_ast: 'CreateSchema { schema_name: ObjectName([Ident { value: "t", quote_style: None }]), if_not_exists: true }' - input: CREATE OR REPLACE TABLE t (a INT) formatted_sql: CREATE OR REPLACE TABLE t (a INT) - - input: CREATE TABLE t (a INT, b INT) AS SELECT 1 AS b, 2 AS a formatted_sql: CREATE TABLE t (a INT, b INT) AS SELECT 1 AS b, 2 AS a - - input: CREATE SOURCE src - error_msg: | - sql parser error: Expected ROW, found: EOF - + error_msg: |- + sql parser error: Expected ROW, found: EOF at the end + Near "CREATE SOURCE src" - input: CREATE SOURCE src ROW FORMAT JSON formatted_sql: CREATE SOURCE src ROW FORMAT JSON - - input: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.servers = 'localhost:1001') ROW FORMAT PROTOBUF MESSAGE 'Foo' ROW SCHEMA LOCATION 'file://' formatted_sql: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.servers = 'localhost:1001') ROW FORMAT PROTOBUF MESSAGE 'Foo' ROW SCHEMA LOCATION 'file://' - formatted_ast: | - CreateSource { stmt: CreateSourceStatement { if_not_exists: true, columns: [], constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "servers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: Protobuf(ProtobufSchema { message_name: AstString("Foo"), row_schema_location: AstString("file://"), use_schema_registry: false }), source_watermarks: [] } } - + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { if_not_exists: true, columns: [], constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "servers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: Protobuf(ProtobufSchema { message_name: AstString("Foo"), row_schema_location: AstString("file://"), use_schema_registry: false }), source_watermarks: [] } }' - input: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.servers = 'localhost:1001') ROW FORMAT PROTOBUF MESSAGE 'Foo' ROW SCHEMA LOCATION CONFLUENT SCHEMA REGISTRY 'http://' formatted_sql: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.servers = 'localhost:1001') ROW FORMAT PROTOBUF MESSAGE 'Foo' ROW SCHEMA LOCATION CONFLUENT SCHEMA REGISTRY 'http://' - formatted_ast: | - CreateSource { stmt: CreateSourceStatement { if_not_exists: true, columns: [], constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "servers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: Protobuf(ProtobufSchema { message_name: AstString("Foo"), row_schema_location: AstString("http://"), use_schema_registry: true }), source_watermarks: [] } } - + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { if_not_exists: true, columns: [], constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "servers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: Protobuf(ProtobufSchema { message_name: AstString("Foo"), row_schema_location: AstString("http://"), use_schema_registry: true }), source_watermarks: [] } }' - input: CREATE SOURCE bid (auction INTEGER, bidder INTEGER, price INTEGER, WATERMARK FOR auction AS auction - 1, "date_time" TIMESTAMP) with (connector = 'nexmark', nexmark.table.type = 'Bid', nexmark.split.num = '12', nexmark.min.event.gap.in.ns = '0') formatted_sql: CREATE SOURCE bid (auction INT, bidder INT, price INT, "date_time" TIMESTAMP, WATERMARK FOR auction AS auction - 1) WITH (connector = 'nexmark', nexmark.table.type = 'Bid', nexmark.split.num = '12', nexmark.min.event.gap.in.ns = '0') ROW FORMAT NATIVE - formatted_ast: | - CreateSource { stmt: CreateSourceStatement { if_not_exists: false, columns: [ColumnDef { name: Ident { value: "auction", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "bidder", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "price", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "date_time", quote_style: Some('"') }, data_type: Some(Timestamp(false)), collation: None, options: [] }], constraints: [], source_name: ObjectName([Ident { value: "bid", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "connector", quote_style: None }]), value: SingleQuotedString("nexmark") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "table", quote_style: None }, Ident { value: "type", quote_style: None }]), value: SingleQuotedString("Bid") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "split", quote_style: None }, Ident { value: "num", quote_style: None }]), value: SingleQuotedString("12") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "min", quote_style: None }, Ident { value: "event", quote_style: None }, Ident { value: "gap", quote_style: None }, Ident { value: "in", quote_style: None }, Ident { value: "ns", quote_style: None }]), value: SingleQuotedString("0") }]), source_schema: Native, source_watermarks: [SourceWatermark { column: Ident { value: "auction", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "auction", quote_style: None }), op: Minus, right: Value(Number("1")) } }] } } - + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { if_not_exists: false, columns: [ColumnDef { name: Ident { value: "auction", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "bidder", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "price", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "date_time", quote_style: Some(''"'') }, data_type: Some(Timestamp(false)), collation: None, options: [] }], constraints: [], source_name: ObjectName([Ident { value: "bid", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "connector", quote_style: None }]), value: SingleQuotedString("nexmark") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "table", quote_style: None }, Ident { value: "type", quote_style: None }]), value: SingleQuotedString("Bid") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "split", quote_style: None }, Ident { value: "num", quote_style: None }]), value: SingleQuotedString("12") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "min", quote_style: None }, Ident { value: "event", quote_style: None }, Ident { value: "gap", quote_style: None }, Ident { value: "in", quote_style: None }, Ident { value: "ns", quote_style: None }]), value: SingleQuotedString("0") }]), source_schema: Native, source_watermarks: [SourceWatermark { column: Ident { value: "auction", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "auction", quote_style: None }), op: Minus, right: Value(Number("1")) } }] } }' - input: CREATE TABLE T (v1 INT, v2 STRUCT) formatted_sql: CREATE TABLE T (v1 INT, v2 STRUCT) - - input: CREATE TABLE T (v1 INT, v2 STRUCT>) formatted_sql: CREATE TABLE T (v1 INT, v2 STRUCT>) - - input: CREATE TABLE T (a STRUCT) formatted_sql: CREATE TABLE T (a STRUCT) - - input: CREATE TABLE T (FULL INT) - error_msg: | - sql parser error: syntax error at or near "FULL" - + error_msg: 'sql parser error: syntax error at or near "FULL"' - input: CREATE TABLE T ("FULL" INT) formatted_sql: CREATE TABLE T ("FULL" INT) - - input: CREATE USER user WITH SUPERUSER CREATEDB PASSWORD 'password' formatted_sql: CREATE USER user WITH SUPERUSER CREATEDB PASSWORD 'password' - - input: CREATE SINK snk - error_msg: | - sql parser error: Expected FROM or AS after CREATE SINK sink_name, found: EOF - + error_msg: |- + sql parser error: Expected FROM or AS after CREATE SINK sink_name, found: EOF at the end + Near "CREATE SINK snk" - input: CREATE SINK IF NOT EXISTS snk FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') formatted_sql: CREATE SINK IF NOT EXISTS snk FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') - - input: CREATE SINK IF NOT EXISTS snk AS SELECT count(*) AS cnt FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') formatted_sql: CREATE SINK IF NOT EXISTS snk AS SELECT count(*) AS cnt FROM mv WITH (connector = 'mysql', mysql.endpoint = '127.0.0.1:3306', mysql.table = '', mysql.database = '', mysql.user = '', mysql.password = '') - - input: create user tmp createdb nocreatedb - error_msg: | - sql parser error: conflicting or redundant options - + error_msg: 'sql parser error: conflicting or redundant options' - input: create user tmp createdb createdb - error_msg: | - sql parser error: conflicting or redundant options - + error_msg: 'sql parser error: conflicting or redundant options' - input: create user tmp with password '123' password null - error_msg: | - sql parser error: conflicting or redundant options - + error_msg: 'sql parser error: conflicting or redundant options' - input: create user tmp with encrypted password '' password null - error_msg: | - sql parser error: conflicting or redundant options - + error_msg: 'sql parser error: conflicting or redundant options' - input: create user tmp with encrypted password null - error_msg: | - sql parser error: Expected literal string, found: null + error_msg: |- + sql parser error: Expected literal string, found: null at line:1, column:45 + Near " tmp with encrypted password null" diff --git a/src/sqlparser/tests/testdata/drop.yaml b/src/sqlparser/tests/testdata/drop.yaml index f6b1673d48375..0fc49060328fb 100644 --- a/src/sqlparser/tests/testdata/drop.yaml +++ b/src/sqlparser/tests/testdata/drop.yaml @@ -1,25 +1,18 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: DROP SOURCE src formatted_sql: DROP SOURCE src - formatted_ast: | - Drop(DropStatement { object_type: Source, if_exists: false, object_name: ObjectName([Ident { value: "src", quote_style: None }]), drop_mode: None }) - + formatted_ast: 'Drop(DropStatement { object_type: Source, if_exists: false, object_name: ObjectName([Ident { value: "src", quote_style: None }]), drop_mode: None })' - input: DROP MATERIALIZED VIEW t formatted_sql: DROP MATERIALIZED VIEW t - - input: DROP DATABASE t formatted_sql: DROP DATABASE t - - input: DROP SCHEMA t formatted_sql: DROP SCHEMA t - - input: DROP DATABASE IF EXISTS t formatted_sql: DROP DATABASE IF EXISTS t - - input: DROP SCHEMA IF EXISTS t formatted_sql: DROP SCHEMA IF EXISTS t - - input: DROP USER user formatted_sql: DROP USER user - - input: DROP USER IF EXISTS user formatted_sql: DROP USER IF EXISTS user diff --git a/src/sqlparser/tests/testdata/drop_index.yaml b/src/sqlparser/tests/testdata/drop_index.yaml index a0cb6d42eb9f0..130a6b36673dd 100644 --- a/src/sqlparser/tests/testdata/drop_index.yaml +++ b/src/sqlparser/tests/testdata/drop_index.yaml @@ -1,4 +1,4 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: DROP INDEX idx_a formatted_sql: DROP INDEX idx_a - formatted_ast: | - Drop(DropStatement { object_type: Index, if_exists: false, object_name: ObjectName([Ident { value: "idx_a", quote_style: None }]), drop_mode: None }) + formatted_ast: 'Drop(DropStatement { object_type: Index, if_exists: false, object_name: ObjectName([Ident { value: "idx_a", quote_style: None }]), drop_mode: None })' diff --git a/src/sqlparser/tests/testdata/insert.yaml b/src/sqlparser/tests/testdata/insert.yaml index 5a8d5a872c874..4d1bb445d0027 100644 --- a/src/sqlparser/tests/testdata/insert.yaml +++ b/src/sqlparser/tests/testdata/insert.yaml @@ -1,6 +1,7 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: INSERT public.customer (id, name, active) VALUES (1, 2, 3) - error_msg: | - sql parser error: Expected INTO, found: public - + error_msg: |- + sql parser error: Expected INTO, found: public at line:1, column:14 + Near "INSERT" - input: INSERT INTO t VALUES(1,3), (2,4) RETURNING *, a, a as aaa formatted_sql: INSERT INTO t VALUES (1, 3), (2, 4) RETURNING (*, a, a AS aaa) diff --git a/src/sqlparser/tests/testdata/precedence.yaml b/src/sqlparser/tests/testdata/precedence.yaml index 5f0304f963f97..72673d1e93299 100644 --- a/src/sqlparser/tests/testdata/precedence.yaml +++ b/src/sqlparser/tests/testdata/precedence.yaml @@ -1,24 +1,16 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: select 2 + 2 ^ 2 formatted_sql: SELECT 2 + 2 ^ 2 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("2")), op: Plus, right: BinaryOp { left: Value(Number("2")), op: BitwiseXor, right: Value(Number("2")) } })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("2")), op: Plus, right: BinaryOp { left: Value(Number("2")), op: BitwiseXor, right: Value(Number("2")) } })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select 2 | 3 & 4 formatted_sql: SELECT 2 | 3 & 4 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: BinaryOp { left: Value(Number("2")), op: BitwiseOr, right: Value(Number("3")) }, op: BitwiseAnd, right: Value(Number("4")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: BinaryOp { left: Value(Number("2")), op: BitwiseOr, right: Value(Number("3")) }, op: BitwiseAnd, right: Value(Number("4")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select - 3 ^ 2 formatted_sql: SELECT -3 ^ 2 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("-3")), op: BitwiseXor, right: Value(Number("2")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("-3")), op: BitwiseXor, right: Value(Number("2")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select - 3 * 2 formatted_sql: SELECT -3 * 2 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("-3")), op: Multiply, right: Value(Number("2")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(BinaryOp { left: Value(Number("-3")), op: Multiply, right: Value(Number("2")) })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: select |/ 4 + 12 formatted_sql: SELECT |/ 4 + 12 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(UnaryOp { op: PGSquareRoot, expr: BinaryOp { left: Value(Number("4")), op: Plus, right: Value(Number("12")) } })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(UnaryOp { op: PGSquareRoot, expr: BinaryOp { left: Value(Number("4")), op: Plus, right: Value(Number("12")) } })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' diff --git a/src/sqlparser/tests/testdata/privilege.yaml b/src/sqlparser/tests/testdata/privilege.yaml index 6cd980d3c5494..44d1b40d0bcc3 100644 --- a/src/sqlparser/tests/testdata/privilege.yaml +++ b/src/sqlparser/tests/testdata/privilege.yaml @@ -1,39 +1,25 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: GRANT ALL ON DATABASE database TO user1 WITH GRANT OPTION GRANTED BY user formatted_sql: GRANT ALL ON DATABASE database TO user1 WITH GRANT OPTION GRANTED BY user - formatted_ast: | - Grant { privileges: All { with_privileges_keyword: false }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: true, granted_by: Some(Ident { value: "user", quote_style: None }) } - + formatted_ast: 'Grant { privileges: All { with_privileges_keyword: false }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: true, granted_by: Some(Ident { value: "user", quote_style: None }) }' - input: GRANT ALL ON SCHEMA schema1, schema2 TO user1 WITH GRANT OPTION GRANTED BY user formatted_sql: GRANT ALL ON SCHEMA schema1, schema2 TO user1 WITH GRANT OPTION GRANTED BY user - formatted_ast: | - Grant { privileges: All { with_privileges_keyword: false }, objects: Schemas([ObjectName([Ident { value: "schema1", quote_style: None }]), ObjectName([Ident { value: "schema2", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: true, granted_by: Some(Ident { value: "user", quote_style: None }) } - + formatted_ast: 'Grant { privileges: All { with_privileges_keyword: false }, objects: Schemas([ObjectName([Ident { value: "schema1", quote_style: None }]), ObjectName([Ident { value: "schema2", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: true, granted_by: Some(Ident { value: "user", quote_style: None }) }' - input: GRANT ALL PRIVILEGES ON ALL SOURCES IN SCHEMA schema TO user1 GRANTED BY user formatted_sql: GRANT ALL PRIVILEGES ON ALL SOURCES IN SCHEMA schema TO user1 GRANTED BY user - formatted_ast: | - Grant { privileges: All { with_privileges_keyword: true }, objects: AllSourcesInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: false, granted_by: Some(Ident { value: "user", quote_style: None }) } - + formatted_ast: 'Grant { privileges: All { with_privileges_keyword: true }, objects: AllSourcesInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: false, granted_by: Some(Ident { value: "user", quote_style: None }) }' - input: GRANT ALL PRIVILEGES ON ALL MATERIALIZED VIEWS IN SCHEMA schema TO user1 GRANTED BY user formatted_sql: GRANT ALL PRIVILEGES ON ALL MATERIALIZED VIEWS IN SCHEMA schema TO user1 GRANTED BY user - formatted_ast: | - Grant { privileges: All { with_privileges_keyword: true }, objects: AllMviewsInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: false, granted_by: Some(Ident { value: "user", quote_style: None }) } - + formatted_ast: 'Grant { privileges: All { with_privileges_keyword: true }, objects: AllMviewsInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], with_grant_option: false, granted_by: Some(Ident { value: "user", quote_style: None }) }' - input: REVOKE GRANT OPTION FOR ALL ON DATABASE database FROM user1 GRANTED BY user formatted_sql: REVOKE GRANT OPTION FOR ALL ON DATABASE database FROM user1 GRANTED BY user RESTRICT - formatted_ast: | - Revoke { privileges: All { with_privileges_keyword: false }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], granted_by: Some(Ident { value: "user", quote_style: None }), revoke_grant_option: true, cascade: false } - + formatted_ast: 'Revoke { privileges: All { with_privileges_keyword: false }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], granted_by: Some(Ident { value: "user", quote_style: None }), revoke_grant_option: true, cascade: false }' - input: REVOKE ALL PRIVILEGES ON DATABASE database FROM user1 GRANTED BY user formatted_sql: REVOKE ALL PRIVILEGES ON DATABASE database FROM user1 GRANTED BY user RESTRICT - formatted_ast: | - Revoke { privileges: All { with_privileges_keyword: true }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], granted_by: Some(Ident { value: "user", quote_style: None }), revoke_grant_option: false, cascade: false } - + formatted_ast: 'Revoke { privileges: All { with_privileges_keyword: true }, objects: Databases([ObjectName([Ident { value: "database", quote_style: None }])]), grantees: [Ident { value: "user1", quote_style: None }], granted_by: Some(Ident { value: "user", quote_style: None }), revoke_grant_option: false, cascade: false }' - input: REVOKE ALL PRIVILEGES ON ALL MATERIALIZED VIEWS IN SCHEMA schema FROM user1 formatted_sql: REVOKE ALL PRIVILEGES ON ALL MATERIALIZED VIEWS IN SCHEMA schema FROM user1 RESTRICT - formatted_ast: | - Revoke { privileges: All { with_privileges_keyword: true }, objects: AllMviewsInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], granted_by: None, revoke_grant_option: false, cascade: false } - + formatted_ast: 'Revoke { privileges: All { with_privileges_keyword: true }, objects: AllMviewsInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], granted_by: None, revoke_grant_option: false, cascade: false }' - input: REVOKE ALL PRIVILEGES ON ALL SOURCES IN SCHEMA schema FROM user1 formatted_sql: REVOKE ALL PRIVILEGES ON ALL SOURCES IN SCHEMA schema FROM user1 RESTRICT - formatted_ast: | - Revoke { privileges: All { with_privileges_keyword: true }, objects: AllSourcesInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], granted_by: None, revoke_grant_option: false, cascade: false } + formatted_ast: 'Revoke { privileges: All { with_privileges_keyword: true }, objects: AllSourcesInSchema { schemas: [ObjectName([Ident { value: "schema", quote_style: None }])] }, grantees: [Ident { value: "user1", quote_style: None }], granted_by: None, revoke_grant_option: false, cascade: false }' diff --git a/src/sqlparser/tests/testdata/select.yaml b/src/sqlparser/tests/testdata/select.yaml index 741790a18120d..0ac9f0addae87 100644 --- a/src/sqlparser/tests/testdata/select.yaml +++ b/src/sqlparser/tests/testdata/select.yaml @@ -1,112 +1,81 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: SELECT sqrt(id) FROM foo formatted_sql: SELECT sqrt(id) FROM foo - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Function(Function { name: ObjectName([Ident { value: "sqrt", quote_style: None }]), args: [Unnamed(Expr(Identifier(Ident { value: "id", quote_style: None })))], over: None, distinct: false, order_by: [], filter: None }))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "foo", quote_style: None }]), alias: None }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - -# Typed string literal + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Function(Function { name: ObjectName([Ident { value: "sqrt", quote_style: None }]), args: [Unnamed(Expr(Identifier(Ident { value: "id", quote_style: None })))], over: None, distinct: false, order_by: [], filter: None }))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "foo", quote_style: None }]), alias: None, for_system_time_as_of_now: false }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT INT '1' formatted_sql: SELECT INT '1' - - input: SELECT (foo).v1.v2 FROM foo formatted_sql: SELECT (foo).v1.v2 FROM foo - - input: SELECT ((((foo).v1)).v2) FROM foo formatted_sql: SELECT (((foo).v1).v2) FROM foo - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Nested(FieldIdentifier(FieldIdentifier(Identifier(Ident { value: "foo", quote_style: None }), [Ident { value: "v1", quote_style: None }]), [Ident { value: "v2", quote_style: None }])))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "foo", quote_style: None }]), alias: None }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Nested(FieldIdentifier(FieldIdentifier(Identifier(Ident { value: "foo", quote_style: None }), [Ident { value: "v1", quote_style: None }]), [Ident { value: "v2", quote_style: None }])))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "foo", quote_style: None }]), alias: None, for_system_time_as_of_now: false }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT (foo.v1).v2 FROM foo formatted_sql: SELECT (foo.v1).v2 FROM foo - - input: SELECT (v1).v2 FROM foo formatted_sql: SELECT (v1).v2 FROM foo - - input: SELECT ((1,2,3)::foo).v1 formatted_sql: SELECT (CAST(ROW(1, 2, 3) AS foo)).v1 - - input: SELECT ((1,2,3)::foo).v1.v2 formatted_sql: SELECT (CAST(ROW(1, 2, 3) AS foo)).v1.v2 - - input: SELECT (((1,2,3)::foo).v1).v2 formatted_sql: SELECT ((CAST(ROW(1, 2, 3) AS foo)).v1).v2 - - input: SELECT (foo).* FROM foo formatted_sql: SELECT (foo).* FROM foo - - input: SELECT ((foo.v1).v2).* FROM foo formatted_sql: SELECT (foo.v1).v2.* FROM foo - - input: SELECT ((1,2,3)::foo).v1.* formatted_sql: SELECT (CAST(ROW(1, 2, 3) AS foo)).v1.* - - input: SELECT (((((1,2,3)::foo).v1))).* formatted_sql: SELECT (CAST(ROW(1, 2, 3) AS foo)).v1.* - - input: SELECT * FROM generate_series('2'::INT,'10'::INT,'2'::INT) formatted_sql: SELECT * FROM generate_series(CAST('2' AS INT), CAST('10' AS INT), CAST('2' AS INT)) - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "generate_series", quote_style: None }]), alias: None, args: [Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("10")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int }))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "generate_series", quote_style: None }]), alias: None, args: [Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("10")), data_type: Int })), Unnamed(Expr(Cast { expr: Value(SingleQuotedString("2")), data_type: Int }))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT * FROM unnest(Array[1,2,3]); formatted_sql: SELECT * FROM unnest(ARRAY[1, 2, 3]) - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: None, args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("3"))], named: true })))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [Wildcard], from: [TableWithJoins { relation: TableFunction { name: ObjectName([Ident { value: "unnest", quote_style: None }]), alias: None, args: [Unnamed(Expr(Array(Array { elem: [Value(Number("1")), Value(Number("2")), Value(Number("3"))], named: true })))] }, joins: [] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT id, fname, lname FROM customer WHERE salary <> 'Not Provided' AND salary <> '' formatted_sql: SELECT id, fname, lname FROM customer WHERE salary <> 'Not Provided' AND salary <> '' - - input: SELECT id FROM customer WHERE NOT salary = '' formatted_sql: SELECT id FROM customer WHERE NOT salary = '' - - input: SELECT * FROM t LIMIT 1 FETCH FIRST ROWS ONLY - error_msg: "sql parser error: Cannot specify both LIMIT and FETCH" - + error_msg: 'sql parser error: Cannot specify both LIMIT and FETCH' - input: SELECT * FROM t FETCH FIRST ROWS WITH TIES - error_msg: "sql parser error: WITH TIES cannot be specified without ORDER BY clause" - + error_msg: 'sql parser error: WITH TIES cannot be specified without ORDER BY clause' - input: select * from (select 1 from 1); - error_msg: "sql parser error: Expected identifier, found: 1" - + error_msg: |- + sql parser error: Expected identifier, found: 1 at line:1, column:31 + Near "from (select 1 from 1" - input: select * from (select * from tumble(t, x, interval '10' minutes)) - error_msg: "sql parser error: Expected ), found: minutes" - + error_msg: |- + sql parser error: Expected ), found: minutes at line:1, column:62 + Near "(t, x, interval '10'" - input: SELECT 1, FROM t error_msg: 'sql parser error: syntax error at or near "FROM"' - - input: SELECT 1, WHERE true error_msg: 'sql parser error: syntax error at or near "WHERE"' - - input: SELECT timestamp with time zone '2022-10-01 12:00:00Z' AT TIME ZONE 'US/Pacific' formatted_sql: SELECT TIMESTAMP WITH TIME ZONE '2022-10-01 12:00:00Z' AT TIME ZONE 'US/Pacific' - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(AtTimeZone { timestamp: TypedString { data_type: Timestamp(true), value: "2022-10-01 12:00:00Z" }, time_zone: "US/Pacific" })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(AtTimeZone { timestamp: TypedString { data_type: Timestamp(true), value: "2022-10-01 12:00:00Z" }, time_zone: "US/Pacific" })], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT 1e6 formatted_sql: SELECT 1e6 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT 1.25E6 formatted_sql: SELECT 1.25e6 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1.25e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1.25e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT 1e-6 formatted_sql: SELECT 1e-6 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1e-6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("1e-6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT -1e6 formatted_sql: SELECT -1e6 - formatted_ast: | - Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("-1e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None }) - + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Value(Number("-1e6")))], from: [], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' - input: SELECT 1::float(0) - error_msg: | - sql parser error: precision for type float must be at least 1 bit + error_msg: 'sql parser error: precision for type float must be at least 1 bit' - input: SELECT 1::float(54) - error_msg: | - sql parser error: precision for type float must be less than 54 bits + error_msg: 'sql parser error: precision for type float must be less than 54 bits' - input: SELECT 1::int(2) - error_msg: | - sql parser error: Expected end of statement, found: ( + error_msg: |- + sql parser error: Expected end of statement, found: ( at line:1, column:14 + Near "SELECT 1::int" +- input: select id1, a1, id2, a2 from stream as S join version FOR SYSTEM_TIME AS OF NOW() AS V on id1= id2 + formatted_sql: SELECT id1, a1, id2, a2 FROM stream AS S JOIN version FOR SYSTEM_TIME AS OF NOW() AS V ON id1 = id2 + formatted_ast: 'Query(Query { with: None, body: Select(Select { distinct: All, projection: [UnnamedExpr(Identifier(Ident { value: "id1", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "a1", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "id2", quote_style: None })), UnnamedExpr(Identifier(Ident { value: "a2", quote_style: None }))], from: [TableWithJoins { relation: Table { name: ObjectName([Ident { value: "stream", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "S", quote_style: None }, columns: [] }), for_system_time_as_of_now: false }, joins: [Join { relation: Table { name: ObjectName([Ident { value: "version", quote_style: None }]), alias: Some(TableAlias { name: Ident { value: "V", quote_style: None }, columns: [] }), for_system_time_as_of_now: true }, join_operator: Inner(On(BinaryOp { left: Identifier(Ident { value: "id1", quote_style: None }), op: Eq, right: Identifier(Ident { value: "id2", quote_style: None }) })) }] }], lateral_views: [], selection: None, group_by: [], having: None }), order_by: [], limit: None, offset: None, fetch: None })' diff --git a/src/sqlparser/tests/testdata/set.yaml b/src/sqlparser/tests/testdata/set.yaml new file mode 100644 index 0000000000000..88aaa61949606 --- /dev/null +++ b/src/sqlparser/tests/testdata/set.yaml @@ -0,0 +1,17 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. +- input: SET TIME ZONE LOCAL + formatted_sql: SET TIME ZONE LOCAL +- input: SET TIME ZONE DEFAULT + formatted_sql: SET TIME ZONE DEFAULT +- input: SET TIME ZONE "Asia/Shanghai" + formatted_sql: SET TIME ZONE "Asia/Shanghai" +- input: SET TIME ZONE 'Asia/Shanghai' + error_msg: |- + sql parser error: Expected a value, found: EOF at the end + Near "SET TIME ZONE 'Asia/Shanghai'" +- input: SET TIME ZONE "UTC" + formatted_sql: SET TIME ZONE "UTC" +- input: SET TIME ZONE UTC + formatted_sql: SET TIME ZONE UTC +- input: set time = '1'; + formatted_sql: SET time = '1' diff --git a/src/sqlparser/tests/testdata/show.yaml b/src/sqlparser/tests/testdata/show.yaml index e2ca4c6eacb1a..079021c135b4b 100644 --- a/src/sqlparser/tests/testdata/show.yaml +++ b/src/sqlparser/tests/testdata/show.yaml @@ -1,74 +1,46 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: SHOW DATABASES formatted_sql: SHOW DATABASES - formatted_ast: | - ShowObjects(Database) - + formatted_ast: ShowObjects(Database) - input: SHOW SCHEMAS formatted_sql: SHOW SCHEMAS - formatted_ast: | - ShowObjects(Schema) - + formatted_ast: ShowObjects(Schema) - input: SHOW TABLES formatted_sql: SHOW TABLES - formatted_ast: | - ShowObjects(Table { schema: None }) - + formatted_ast: 'ShowObjects(Table { schema: None })' - input: SHOW TABLES FROM t formatted_sql: SHOW TABLES FROM t - formatted_ast: | - ShowObjects(Table { schema: Some(Ident { value: "t", quote_style: None }) }) - + formatted_ast: 'ShowObjects(Table { schema: Some(Ident { value: "t", quote_style: None }) })' - input: SHOW VIEWS formatted_sql: SHOW VIEWS - formatted_ast: | - ShowObjects(View { schema: None }) - + formatted_ast: 'ShowObjects(View { schema: None })' - input: SHOW VIEWS FROM t formatted_sql: SHOW VIEWS FROM t - formatted_ast: | - ShowObjects(View { schema: Some(Ident { value: "t", quote_style: None }) }) - + formatted_ast: 'ShowObjects(View { schema: Some(Ident { value: "t", quote_style: None }) })' - input: SHOW MATERIALIZED VIEWS formatted_sql: SHOW MATERIALIZED VIEWS - formatted_ast: | - ShowObjects(MaterializedView { schema: None }) - + formatted_ast: 'ShowObjects(MaterializedView { schema: None })' - input: SHOW INTERNAL TABLES formatted_sql: SHOW INTERNAL TABLES - formatted_ast: | - ShowObjects(InternalTable { schema: None }) - + formatted_ast: 'ShowObjects(InternalTable { schema: None })' - input: SHOW INTERNAL TABLES FROM t formatted_sql: SHOW INTERNAL TABLES FROM t - formatted_ast: | - ShowObjects(InternalTable { schema: Some(Ident { value: "t", quote_style: None }) }) - + formatted_ast: 'ShowObjects(InternalTable { schema: Some(Ident { value: "t", quote_style: None }) })' - input: SHOW MATERIALIZED VIEWS FROM t formatted_sql: SHOW MATERIALIZED VIEWS FROM t - formatted_ast: | - ShowObjects(MaterializedView { schema: Some(Ident { value: "t", quote_style: None }) }) - + formatted_ast: 'ShowObjects(MaterializedView { schema: Some(Ident { value: "t", quote_style: None }) })' - input: SHOW SOURCES FROM t formatted_sql: SHOW SOURCES FROM t - formatted_ast: | - ShowObjects(Source { schema: Some(Ident { value: "t", quote_style: None }) }) - + formatted_ast: 'ShowObjects(Source { schema: Some(Ident { value: "t", quote_style: None }) })' - input: DESCRIBE schema.t formatted_sql: DESCRIBE schema.t - formatted_ast: | - Describe { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "t", quote_style: None }]) } - + formatted_ast: 'Describe { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "t", quote_style: None }]) }' - input: SHOW COLUMNS FROM schema.t formatted_sql: SHOW COLUMNS FROM schema.t - formatted_ast: | - ShowObjects(Columns { table: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "t", quote_style: None }]) }) - + formatted_ast: 'ShowObjects(Columns { table: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "t", quote_style: None }]) })' - input: SHOW CREATE MATERIALIZED VIEW schema.mv formatted_sql: SHOW CREATE MATERIALIZED VIEW schema.mv - formatted_ast: | - ShowCreateObject { create_type: MaterializedView, name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "mv", quote_style: None }]) } - + formatted_ast: 'ShowCreateObject { create_type: MaterializedView, name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "mv", quote_style: None }]) }' - input: SHOW CREATE VIEW schema.v formatted_sql: SHOW CREATE VIEW schema.v - formatted_ast: | - ShowCreateObject { create_type: View, name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "v", quote_style: None }]) } + formatted_ast: 'ShowCreateObject { create_type: View, name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "v", quote_style: None }]) }' diff --git a/src/sqlparser/tests/testdata/struct.yaml b/src/sqlparser/tests/testdata/struct.yaml index 5a3815b3f99c7..4898714fc5858 100644 --- a/src/sqlparser/tests/testdata/struct.yaml +++ b/src/sqlparser/tests/testdata/struct.yaml @@ -1,5 +1,5 @@ +# This file is automatically generated. See `src/sqlparser/test_runner/src/bin/apply.rs` for more information. - input: SELECT (1 * 2, 1.0)::foo; formatted_sql: SELECT CAST(ROW(1 * 2, 1.0) AS foo) - - input: SELECT ROW(1 * 2, 1.0)::foo; formatted_sql: SELECT CAST(ROW(1 * 2, 1.0) AS foo) diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 458fddc776fbf..ce12fee5e50bc 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -16,7 +16,7 @@ normal = ["workspace-hack"] [dependencies] arc-swap = "1" async-trait = "0.1" -auto_enums = { version = "0.7", features = ["futures"] } +auto_enums = { version = "0.8", features = ["futures03"] } await-tree = "0.1.1" bytes = { version = "1", features = ["serde"] } crossbeam = "0.8.1" @@ -26,6 +26,7 @@ enum-as-inner = "0.5" fail = "0.5" futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = "0.2" +hex = "0.4" itertools = "0.10" libc = "0.2" lz4 = "1.23.1" @@ -48,6 +49,7 @@ scopeguard = "1" sled = "0.34.7" spin = "0.9" sync-point = { path = "../utils/sync-point" } +sysinfo = { version = "0.26", default-features = false } tempfile = "3" thiserror = "1" # tikv-client = { git = "https://github.com/tikv/client-rust", rev = "5714b2", optional = true } @@ -67,8 +69,15 @@ xxhash-rust = { version = "0.8.5", features = ["xxh32", "xxh64"] } zstd = "0.11.2" [target.'cfg(target_os = "linux")'.dependencies] +procfs = { version = "0.12", default-features = false } +libc = "0.2" nix = { version = "0.25", features = ["fs", "mman"] } +[target.'cfg(target_os = "macos")'.dependencies] +darwin-libproc = { git = "https://github.com/risingwavelabs/darwin-libproc.git", rev = "a502be24bd0971463f5bcbfe035a248d8ba503b7" } +libc = "0.2.72" +mach = "0.3.2" + [target.'cfg(not(madsim))'.dependencies] workspace-hack = { path = "../workspace-hack" } diff --git a/src/storage/backup/integration_tests/common.sh b/src/storage/backup/integration_tests/common.sh index 672463df6a926..163f4736f3abc 100644 --- a/src/storage/backup/integration_tests/common.sh +++ b/src/storage/backup/integration_tests/common.sh @@ -19,10 +19,6 @@ function clean_etcd_data() { function start_cluster() { cargo make d ci-meta-backup-test 1>/dev/null 2>&1 -} - -function wait_cluster_ready() { - # TODO #6482: wait cluster to finish actor migration and other recovery stuff deterministically. sleep 5 } diff --git a/src/storage/backup/integration_tests/run_all.sh b/src/storage/backup/integration_tests/run_all.sh index b89e2a77a0040..41dce2d51a0e0 100644 --- a/src/storage/backup/integration_tests/run_all.sh +++ b/src/storage/backup/integration_tests/run_all.sh @@ -6,6 +6,7 @@ tests=( \ "test_basic.sh" \ "test_pin_sst.sh" \ "test_query_backup.sh" \ +"test_set_config.sh" \ ) for t in "${tests[@]}" do diff --git a/src/storage/backup/integration_tests/test_basic.sh b/src/storage/backup/integration_tests/test_basic.sh index aefb4068949fb..afaee3ac6c507 100644 --- a/src/storage/backup/integration_tests/test_basic.sh +++ b/src/storage/backup/integration_tests/test_basic.sh @@ -43,7 +43,6 @@ if ! psql -h localhost -p 4566 -d dev -U root -c "show materialized views;" | gr exit 1 fi echo "restore snapshot ${job_id_2} succeeded" -wait_cluster_ready query_mvs # any other ops in the restored cluster drop_mvs diff --git a/src/storage/backup/integration_tests/test_set_config.sh b/src/storage/backup/integration_tests/test_set_config.sh new file mode 100644 index 0000000000000..2539df02882e0 --- /dev/null +++ b/src/storage/backup/integration_tests/test_set_config.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +. "${DIR}/common.sh" + +stop_cluster +clean_all_data +start_cluster + +execute_sql_and_expect \ +"SHOW parameters;" \ +"backup_storage_directory | backup" + +execute_sql_and_expect \ +"SHOW parameters;" \ +"backup_storage_url | minio://hummockadmin:hummockadmin@127.0.0.1:9301/hummock001" + +backup + +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"1 row" + +execute_sql_and_expect \ +"alter system set backup_storage_directory to backup_1;" \ +"ALTER_SYSTEM" +# system params application is async. +sleep 5 + +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"0 row" + +backup +backup +backup +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"3 row" + +execute_sql_and_expect \ +"alter system set backup_storage_directory to backup;" \ +"ALTER_SYSTEM" +sleep 5 + +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"1 row" + +execute_sql_and_expect \ +"alter system set backup_storage_url to memory;" \ +"ALTER_SYSTEM" +sleep 5 + +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"0 row" + +backup +backup +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"2 row" + +execute_sql_and_expect \ +"alter system set backup_storage_url to \"minio://hummockadmin:hummockadmin@127.0.0.1:9301/hummock001\"" \ +"ALTER_SYSTEM" +sleep 5 + +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"1 row" + +backup +execute_sql_and_expect \ +"SELECT meta_snapshot_id FROM rw_catalog.rw_meta_snapshot;" \ +"2 row" + +echo "test succeeded" \ No newline at end of file diff --git a/src/storage/backup/src/lib.rs b/src/storage/backup/src/lib.rs index 44b9c39c9f87b..ab85d68095216 100644 --- a/src/storage/backup/src/lib.rs +++ b/src/storage/backup/src/lib.rs @@ -35,15 +35,13 @@ pub mod error; pub mod meta_snapshot; pub mod storage; +use std::collections::HashSet; use std::hash::Hasher; use itertools::Itertools; use risingwave_hummock_sdk::compaction_group::hummock_version_ext::HummockVersionExt; -use risingwave_hummock_sdk::{HummockSstableId, HummockVersionId}; -use risingwave_pb::backup_service::{ - MetaSnapshotManifest as ProstMetaSnapshotManifest, - MetaSnapshotMetadata as ProstMetaSnapshotMetadata, -}; +use risingwave_hummock_sdk::{HummockSstableObjectId, HummockVersionId}; +use risingwave_pb::backup_service::{PbMetaSnapshotManifest, PbMetaSnapshotMetadata}; use risingwave_pb::hummock::HummockVersion; use serde::{Deserialize, Serialize}; @@ -57,7 +55,7 @@ pub type MetaBackupJobId = u64; pub struct MetaSnapshotMetadata { pub id: MetaSnapshotId, pub hummock_version_id: HummockVersionId, - pub ssts: Vec, + pub ssts: Vec, pub max_committed_epoch: u64, pub safe_epoch: u64, } @@ -67,7 +65,9 @@ impl MetaSnapshotMetadata { Self { id, hummock_version_id: v.id, - ssts: v.get_sst_ids(), + ssts: HashSet::::from_iter(v.get_object_ids()) + .into_iter() + .collect_vec(), max_committed_epoch: v.max_committed_epoch, safe_epoch: v.safe_epoch, } @@ -99,7 +99,7 @@ pub fn xxhash64_verify(data: &[u8], checksum: u64) -> BackupResult<()> { Ok(()) } -impl From<&MetaSnapshotMetadata> for ProstMetaSnapshotMetadata { +impl From<&MetaSnapshotMetadata> for PbMetaSnapshotMetadata { fn from(m: &MetaSnapshotMetadata) -> Self { Self { id: m.id, @@ -110,7 +110,7 @@ impl From<&MetaSnapshotMetadata> for ProstMetaSnapshotMetadata { } } -impl From<&MetaSnapshotManifest> for ProstMetaSnapshotManifest { +impl From<&MetaSnapshotManifest> for PbMetaSnapshotManifest { fn from(m: &MetaSnapshotManifest) -> Self { Self { manifest_id: m.manifest_id, diff --git a/src/storage/backup/src/storage.rs b/src/storage/backup/src/storage.rs index bc9108bdd2e57..ff8ef286d632b 100644 --- a/src/storage/backup/src/storage.rs +++ b/src/storage/backup/src/storage.rs @@ -24,6 +24,7 @@ use crate::{ }; pub type MetaSnapshotStorageRef = Arc; +pub type BoxedMetaSnapshotStorage = Box; #[async_trait::async_trait] pub trait MetaSnapshotStorage: 'static + Sync + Send { diff --git a/src/storage/benches/bench_block_iter.rs b/src/storage/benches/bench_block_iter.rs index a0b8d3a27057d..8de5f05b20c2b 100644 --- a/src/storage/benches/bench_block_iter.rs +++ b/src/storage/benches/bench_block_iter.rs @@ -12,8 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![feature(once_cell)] +use std::sync::LazyLock; + use bytes::{BufMut, Bytes, BytesMut}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use risingwave_hummock_sdk::key::FullKey; use risingwave_storage::hummock::{ Block, BlockBuilder, BlockBuilderOptions, BlockHolder, BlockIterator, CompressionAlgorithm, }; @@ -22,6 +26,7 @@ const TABLES_PER_SSTABLE: u32 = 10; const KEYS_PER_TABLE: u64 = 100; const RESTART_INTERVAL: usize = 16; const BLOCK_CAPACITY: usize = TABLES_PER_SSTABLE as usize * KEYS_PER_TABLE as usize * 64; +const EXCHANGE_INTERVAL: usize = RESTART_INTERVAL / 2; fn block_iter_next(block: BlockHolder) { let mut iter = BlockIterator::new(block); @@ -85,11 +90,22 @@ fn bench_block_iter(c: &mut Criterion) { let l = data.len(); let block = BlockHolder::from_owned_block(Box::new(Block::decode(data, l).unwrap())); let mut iter = BlockIterator::new(block); + let mut item_count = 0; + let mut ext_index = 0; + let (mut k_ext, mut v_ext) = (&DATA_LEN_SET[ext_index].0, &DATA_LEN_SET[ext_index].1); + iter.seek_to_first(); for t in 1..=TABLES_PER_SSTABLE { for i in 1..=KEYS_PER_TABLE { - assert_eq!(iter.key(), key(t, i).to_vec()); - assert_eq!(iter.value(), value(i).to_vec()); + item_count += 1; + + if item_count % EXCHANGE_INTERVAL == 0 { + ext_index = (ext_index + 1) % DATA_LEN_SET.len(); + (k_ext, v_ext) = (&DATA_LEN_SET[ext_index].0, &DATA_LEN_SET[ext_index].1); + } + + assert_eq!(iter.key(), FullKey::decode(&key(t, i, k_ext))); + assert_eq!(iter.value(), value(i, v_ext).to_vec()); iter.next(); } } @@ -99,6 +115,20 @@ fn bench_block_iter(c: &mut Criterion) { criterion_group!(benches, bench_block_iter); criterion_main!(benches); +static DATA_LEN_SET: LazyLock, Vec)>> = LazyLock::new(|| { + vec![ + (vec![b'a'; 10], vec![b'a'; 10]), // U8U8 + (vec![b'a'; 10], vec![b'a'; 300]), // U8U16 + (vec![b'a'; 100], vec![b'a'; 65550]), // U8U32 + (vec![b'a'; 300], vec![b'a'; 100]), // U16U8 + (vec![b'a'; 300], vec![b'a'; 300]), // U16U16 + (vec![b'a'; 300], vec![b'a'; 65550]), // U16U32 + (vec![b'a'; 65550], vec![b'a'; 100]), // U32U8 + (vec![b'a'; 65550], vec![b'a'; 300]), // U32U16 + (vec![b'a'; 65550], vec![b'a'; 65550]), // U32U32 + ] +}); + fn build_block_data(t: u32, i: u64) -> Bytes { let options = BlockBuilderOptions { capacity: BLOCK_CAPACITY, @@ -106,23 +136,36 @@ fn build_block_data(t: u32, i: u64) -> Bytes { restart_interval: RESTART_INTERVAL, }; let mut builder = BlockBuilder::new(options); + let mut item_count = 0; + let mut ext_index = 0; + let (mut k_ext, mut v_ext) = (&DATA_LEN_SET[ext_index].0, &DATA_LEN_SET[ext_index].1); + for tt in 1..=t { for ii in 1..=i { - builder.add(&key(tt, ii), &value(ii)); + item_count += 1; + + if item_count % EXCHANGE_INTERVAL == 0 { + ext_index = (ext_index + 1) % DATA_LEN_SET.len(); + (k_ext, v_ext) = (&DATA_LEN_SET[ext_index].0, &DATA_LEN_SET[ext_index].1); + } + + builder.add(FullKey::decode(&key(tt, ii, k_ext)), &value(ii, v_ext)); } } Bytes::from(builder.build().to_vec()) } -fn key(t: u32, i: u64) -> Bytes { +fn key(t: u32, i: u64, ext: &[u8]) -> Bytes { let mut buf = BytesMut::new(); + buf.put_slice(ext); buf.put_u32(t); buf.put_u64(i); buf.freeze() } -fn value(i: u64) -> Bytes { +fn value(i: u64, ext: &[u8]) -> Bytes { let mut buf = BytesMut::new(); buf.put_u64(i); + buf.put(ext); buf.freeze() } diff --git a/src/storage/benches/bench_compactor.rs b/src/storage/benches/bench_compactor.rs index 78879bf667328..4ee7b63aed13e 100644 --- a/src/storage/benches/bench_compactor.rs +++ b/src/storage/benches/bench_compactor.rs @@ -75,7 +75,7 @@ const MAX_KEY_COUNT: usize = 128 * 1024; async fn build_table( sstable_store: SstableStoreRef, - sstable_id: u64, + sstable_object_id: u64, range: Range, epoch: u64, ) -> SstableInfo { @@ -87,14 +87,14 @@ async fn build_table( compression_algorithm: CompressionAlgorithm::None, }; let writer = sstable_store.create_sst_writer( - sstable_id, + sstable_object_id, SstableWriterOptions { capacity_hint: None, tracker: None, policy: CachePolicy::Fill(true), }, ); - let mut builder = SstableBuilder::for_test(sstable_id, writer, opt); + let mut builder = SstableBuilder::for_test(sstable_object_id, writer, opt); let value = b"1234567890123456789"; let mut full_key = test_key_of(0, epoch); let table_key_len = full_key.user_key.table_key.len(); @@ -103,7 +103,11 @@ async fn build_table( let end = start + 8; full_key.user_key.table_key[table_key_len - 8..].copy_from_slice(&i.to_be_bytes()); builder - .add(&full_key, HummockValue::put(&value[start..end]), true) + .add( + full_key.to_ref(), + HummockValue::put(&value[start..end]), + true, + ) .await .unwrap(); } @@ -229,8 +233,18 @@ fn bench_merge_iterator_compactor(c: &mut Criterion) { c.bench_function("bench_merge_iterator", |b| { b.to_async(&runtime).iter(|| { let sub_iters = vec![ - ConcatSstableIterator::new(level1.clone(), KeyRange::inf(), sstable_store.clone()), - ConcatSstableIterator::new(level2.clone(), KeyRange::inf(), sstable_store.clone()), + ConcatSstableIterator::new( + vec![0], + level1.clone(), + KeyRange::inf(), + sstable_store.clone(), + ), + ConcatSstableIterator::new( + vec![0], + level2.clone(), + KeyRange::inf(), + sstable_store.clone(), + ), ]; let iter = UnorderedMergeIteratorInner::for_compactor(sub_iters); let sstable_store1 = sstable_store.clone(); diff --git a/src/storage/benches/bench_lru_cache.rs b/src/storage/benches/bench_lru_cache.rs index e83aae40cca50..553ef2cabc780 100644 --- a/src/storage/benches/bench_lru_cache.rs +++ b/src/storage/benches/bench_lru_cache.rs @@ -32,16 +32,16 @@ pub struct Block { offset: u64, } -fn make_key(sst_id: u64, block_idx: u64) -> Bytes { +fn make_key(sst_object_id: u64, block_idx: u64) -> Bytes { let mut key = BytesMut::with_capacity(16); - key.put_u64_le(sst_id); + key.put_u64_le(sst_object_id); key.put_u64_le(block_idx); key.freeze() } #[async_trait] pub trait CacheBase: Sync + Send { - async fn try_get_with(&self, sst_id: u64, block_idx: u64) -> HummockResult>; + async fn try_get_with(&self, sst_object_id: u64, block_idx: u64) -> HummockResult>; } pub struct MokaCache { @@ -64,12 +64,12 @@ impl MokaCache { #[async_trait] impl CacheBase for MokaCache { - async fn try_get_with(&self, sst_id: u64, block_idx: u64) -> HummockResult> { - let k = make_key(sst_id, block_idx); + async fn try_get_with(&self, sst_object_id: u64, block_idx: u64) -> HummockResult> { + let k = make_key(sst_object_id, block_idx); let latency = self.fake_io_latency; self.inner .try_get_with(k, async move { - match get_fake_block(sst_id, block_idx, latency).await { + match get_fake_block(sst_object_id, block_idx, latency).await { Ok(ret) => Ok(Arc::new(ret)), Err(e) => Err(e), } @@ -95,17 +95,17 @@ impl LruCacheImpl { #[async_trait] impl CacheBase for LruCacheImpl { - async fn try_get_with(&self, sst_id: u64, block_idx: u64) -> HummockResult> { + async fn try_get_with(&self, sst_object_id: u64, block_idx: u64) -> HummockResult> { let mut hasher = DefaultHasher::new(); - let key = (sst_id, block_idx); - sst_id.hash(&mut hasher); + let key = (sst_object_id, block_idx); + sst_object_id.hash(&mut hasher); block_idx.hash(&mut hasher); let h = hasher.finish(); let latency = self.fake_io_latency; let entry = self .inner .lookup_with_request_dedup(h, key, true, || async move { - get_fake_block(sst_id, block_idx, latency) + get_fake_block(sst_object_id, block_idx, latency) .await .map(|block| (Arc::new(block), 1)) }) @@ -141,11 +141,14 @@ fn bench_cache(block_cache: Arc, c: &mut Criterion, k let mut rng = SmallRng::seed_from_u64(seed); let t = Instant::now(); for _ in 0..key_count { - let sst_id = rng.next_u64() % 8; + let sst_object_id = rng.next_u64() % 8; let block_offset = rng.next_u64() % key_count; - let block = cache.try_get_with(sst_id, block_offset).await.unwrap(); + let block = cache + .try_get_with(sst_object_id, block_offset) + .await + .unwrap(); assert_eq!(block.offset, block_offset); - assert_eq!(block.sst, sst_id); + assert_eq!(block.sst, sst_object_id); } t.elapsed() }); @@ -169,11 +172,14 @@ fn bench_cache(block_cache: Arc, c: &mut Criterion, k let seed = 10244021u64; let mut rng = SmallRng::seed_from_u64(seed); for _ in 0..(key_count / 100) { - let sst_id = rng.next_u64() % 1024; + let sst_object_id = rng.next_u64() % 1024; let block_offset = rng.next_u64() % 1024; - let block = cache.try_get_with(sst_id, block_offset).await.unwrap(); + let block = cache + .try_get_with(sst_object_id, block_offset) + .await + .unwrap(); assert_eq!(block.offset, block_offset); - assert_eq!(block.sst, sst_id); + assert_eq!(block.sst, sst_object_id); } }; current.block_on(f); diff --git a/src/storage/benches/bench_multi_builder.rs b/src/storage/benches/bench_multi_builder.rs index 8f6f28a814a74..a7a162cf213c0 100644 --- a/src/storage/benches/bench_multi_builder.rs +++ b/src/storage/benches/bench_multi_builder.rs @@ -102,7 +102,7 @@ async fn build_tables( for i in RANGE { builder .add_full_key( - &FullKey::from_user_key(test_user_key_of(i).as_ref(), 1), + FullKey::from_user_key(test_user_key_of(i).as_ref(), 1), HummockValue::put(VALUE), true, ) diff --git a/src/storage/compactor/Cargo.toml b/src/storage/compactor/Cargo.toml index e52bdb1c275c6..d66648e8f316a 100644 --- a/src/storage/compactor/Cargo.toml +++ b/src/storage/compactor/Cargo.toml @@ -15,6 +15,7 @@ ignored = ["workspace-hack"] normal = ["workspace-hack"] [dependencies] +anyhow = "1" async-trait = "0.1" clap = { version = "4", features = ["derive"] } prometheus = { version = "0.13" } @@ -26,6 +27,8 @@ risingwave_object_store = { path = "../../object_store" } risingwave_pb = { path = "../../prost" } risingwave_rpc_client = { path = "../../rpc_client" } risingwave_storage = { path = "../../storage" } +serde = { version = "1", features = ["derive"] } +serde_json = "1" tokio = { version = "0.2", package = "madsim-tokio", features = [ "fs", "rt", diff --git a/src/storage/compactor/src/compactor_observer/observer_manager.rs b/src/storage/compactor/src/compactor_observer/observer_manager.rs index 2e9996ffaa699..6b539ecadfa27 100644 --- a/src/storage/compactor/src/compactor_observer/observer_manager.rs +++ b/src/storage/compactor/src/compactor_observer/observer_manager.rs @@ -21,6 +21,7 @@ use risingwave_hummock_sdk::filter_key_extractor::{ FilterKeyExtractorImpl, FilterKeyExtractorManagerRef, }; use risingwave_pb::catalog::Table; +use risingwave_pb::meta::relation::RelationInfo; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::meta::SubscribeResponse; @@ -39,17 +40,24 @@ impl ObserverState for CompactorObserverNode { }; match info.to_owned() { - Info::Table(table_catalog) => { - assert!( - resp.version > self.version, - "resp version={:?}, current version={:?}", - resp.version, - self.version - ); + Info::RelationGroup(relation_group) => { + for relation in relation_group.relations { + match relation.relation_info.unwrap() { + RelationInfo::Table(table_catalog) => { + assert!( + resp.version > self.version, + "resp version={:?}, current version={:?}", + resp.version, + self.version + ); - self.handle_catalog_notification(resp.operation(), table_catalog); + self.handle_catalog_notification(resp.operation(), table_catalog); - self.version = resp.version; + self.version = resp.version; + } + _ => panic!("error type notification"), + }; + } } Info::HummockVersionDeltas(_) => {} Info::SystemParams(p) => { diff --git a/src/storage/compactor/src/lib.rs b/src/storage/compactor/src/lib.rs index 37918b6d44589..5ad04828b4815 100644 --- a/src/storage/compactor/src/lib.rs +++ b/src/storage/compactor/src/lib.rs @@ -15,6 +15,7 @@ mod compactor_observer; mod rpc; mod server; +mod telemetry; use clap::Parser; use risingwave_common_proc_macro::OverrideConfig; @@ -27,19 +28,14 @@ pub struct CompactorOpts { // TODO: rename to listen_addr and separate out the port. /// The address that this service listens to. /// Usually the localhost + desired port. - #[clap( - long, - alias = "host", - env = "RW_LISTEN_ADDR", - default_value = "127.0.0.1:6660" - )] + #[clap(long, env = "RW_LISTEN_ADDR", default_value = "127.0.0.1:6660")] pub listen_addr: String, /// The address for contacting this instance of the service. /// This would be synonymous with the service's "public address" /// or "identifying address". /// Optional, we will use listen_addr if not specified. - #[clap(long, env = "RW_ADVERTISE_ADDR", alias = "client-address")] + #[clap(long, env = "RW_ADVERTISE_ADDR")] pub advertise_addr: Option, // TODO: This is currently unused. @@ -56,12 +52,6 @@ pub struct CompactorOpts { #[clap(long, env = "RW_META_ADDR", default_value = "http://127.0.0.1:5690")] pub meta_address: String, - /// Of the form `hummock+{object_store}` where `object_store` - /// is one of `s3://{path}`, `s3-compatible://{path}`, `minio://{path}`, `disk://{path}`, - /// `memory` or `memory-shared`. - #[clap(long, env = "RW_STATE_STORE")] - pub state_store: Option, - #[clap(long, env = "RW_COMPACTION_WORKER_THREADS_NUMBER")] pub compaction_worker_threads_number: Option, @@ -99,7 +89,6 @@ pub fn start(opts: CompactorOpts) -> Pin + Send>> { // slow compile in release mode. Box::pin(async move { tracing::info!("Compactor node options: {:?}", opts); - warn_future_deprecate_options(&opts); tracing::info!("meta address: {}", opts.meta_address.clone()); let listen_addr = opts.listen_addr.parse().unwrap(); @@ -123,9 +112,3 @@ pub fn start(opts: CompactorOpts) -> Pin + Send>> { observer_join_handle.abort(); }) } - -fn warn_future_deprecate_options(opts: &CompactorOpts) { - if opts.state_store.is_some() { - tracing::warn!("`--state-store` will not be accepted by compactor node in the next release. Please consider moving this argument to the meta node."); - } -} diff --git a/src/storage/compactor/src/server.rs b/src/storage/compactor/src/server.rs index a0476fca6cf24..67057a00d2fd6 100644 --- a/src/storage/compactor/src/server.rs +++ b/src/storage/compactor/src/server.rs @@ -19,6 +19,8 @@ use std::time::Duration; use risingwave_common::config::load_config; use risingwave_common::monitor::process_linux::monitor_process; use risingwave_common::system_param::local_manager::LocalSystemParamsManager; +use risingwave_common::telemetry::manager::TelemetryManager; +use risingwave_common::telemetry::telemetry_env_enabled; use risingwave_common::util::addr::HostAddr; use risingwave_common::{GIT_SHA, RW_VERSION}; use risingwave_common_service::metrics_manager::MetricsManager; @@ -32,7 +34,7 @@ use risingwave_rpc_client::MetaClient; use risingwave_storage::hummock::compactor::{CompactionExecutor, CompactorContext}; use risingwave_storage::hummock::hummock_meta_client::MonitoredHummockMetaClient; use risingwave_storage::hummock::{ - CompactorMemoryCollector, MemoryLimiter, SstableIdManager, SstableStore, + CompactorMemoryCollector, MemoryLimiter, SstableObjectIdManager, SstableStore, }; use risingwave_storage::monitor::{ monitor_cache, CompactorMetrics, HummockMetrics, ObjectStoreMetrics, @@ -44,6 +46,7 @@ use tracing::info; use super::compactor_observer::observer_manager::CompactorObserverNode; use crate::rpc::CompactorServiceImpl; +use crate::telemetry::CompactorTelemetryCreator; use crate::CompactorOpts; /// Fetches and runs compaction tasks. @@ -85,10 +88,7 @@ pub async fn compactor_serve( hummock_metrics.clone(), )); - let state_store_url = { - let from_local = opts.state_store.unwrap_or("".to_string()); - system_params_reader.state_store(from_local) - }; + let state_store_url = system_params_reader.state_store(); let storage_opts = Arc::new(StorageOpts::from((&config, &system_params_reader))); let object_store = Arc::new( @@ -108,10 +108,14 @@ pub async fn compactor_serve( storage_opts.meta_cache_capacity_mb * (1 << 20), )); + let telemetry_enabled = system_params_reader.telemetry_enabled(); + let filter_key_extractor_manager = Arc::new(FilterKeyExtractorManager::default()); let system_params_manager = Arc::new(LocalSystemParamsManager::new(system_params_reader)); - let compactor_observer_node = - CompactorObserverNode::new(filter_key_extractor_manager.clone(), system_params_manager); + let compactor_observer_node = CompactorObserverNode::new( + filter_key_extractor_manager.clone(), + system_params_manager.clone(), + ); let observer_manager = ObserverManager::new_with_meta_client(meta_client.clone(), compactor_observer_node).await; @@ -128,7 +132,7 @@ pub async fn compactor_serve( Arc::new(MemoryLimiter::new(input_limit_mb << 20)), )); monitor_cache(memory_collector, ®istry).unwrap(); - let sstable_id_manager = Arc::new(SstableIdManager::new( + let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage_opts.sstable_id_remote_fetch_number, )); @@ -143,18 +147,18 @@ pub async fn compactor_serve( )), filter_key_extractor_manager: filter_key_extractor_manager.clone(), read_memory_limiter: memory_limiter, - sstable_id_manager: sstable_id_manager.clone(), + sstable_object_id_manager: sstable_object_id_manager.clone(), task_progress_manager: Default::default(), compactor_runtime_config: Arc::new(tokio::sync::Mutex::new(CompactorRuntimeConfig { max_concurrent_task_number, })), }); - let sub_tasks = vec![ + let mut sub_tasks = vec![ MetaClient::start_heartbeat_loop( meta_client.clone(), Duration::from_millis(config.server.heartbeat_interval_ms as u64), Duration::from_secs(config.server.max_heartbeat_interval_secs as u64), - vec![sstable_id_manager], + vec![sstable_object_id_manager], ), risingwave_storage::hummock::compactor::Compactor::start_compactor( compactor_context.clone(), @@ -162,6 +166,22 @@ pub async fn compactor_serve( ), ]; + let telemetry_manager = TelemetryManager::new( + system_params_manager.watch_params(), + Arc::new(meta_client.clone()), + Arc::new(CompactorTelemetryCreator::new()), + ); + // if the toml config file or env variable disables telemetry, do not watch system params change + // because if any of configs disable telemetry, we should never start it + if config.server.telemetry_enabled && telemetry_env_enabled() { + if telemetry_enabled { + telemetry_manager.start_telemetry_reporting(); + } + sub_tasks.push(telemetry_manager.watch_params_change()); + } else { + tracing::info!("Telemetry didn't start due to config"); + } + let (shutdown_send, mut shutdown_recv) = tokio::sync::oneshot::channel(); let join_handle = tokio::spawn(async move { tonic::transport::Server::builder() diff --git a/src/storage/compactor/src/telemetry.rs b/src/storage/compactor/src/telemetry.rs new file mode 100644 index 0000000000000..a2014959c61de --- /dev/null +++ b/src/storage/compactor/src/telemetry.rs @@ -0,0 +1,75 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_common::telemetry::report::TelemetryReportCreator; +use risingwave_common::telemetry::{ + current_timestamp, SystemData, TelemetryNodeType, TelemetryReport, TelemetryReportBase, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy)] +pub(crate) struct CompactorTelemetryCreator {} + +impl CompactorTelemetryCreator { + pub(crate) fn new() -> Self { + Self {} + } +} + +impl TelemetryReportCreator for CompactorTelemetryCreator { + fn create_report( + &self, + tracking_id: String, + session_id: String, + up_time: u64, + ) -> anyhow::Result { + Ok(CompactorTelemetryReport::new( + tracking_id, + session_id, + up_time, + )) + } + + fn report_type(&self) -> &str { + "compactor" + } +} + +#[derive(Serialize, Deserialize)] +pub(crate) struct CompactorTelemetryReport { + #[serde(flatten)] + base: TelemetryReportBase, +} + +impl TelemetryReport for CompactorTelemetryReport { + fn to_json(&self) -> anyhow::Result { + let json = serde_json::to_string(self)?; + Ok(json) + } +} + +impl CompactorTelemetryReport { + pub(crate) fn new(tracking_id: String, session_id: String, up_time: u64) -> Self { + Self { + base: TelemetryReportBase { + tracking_id, + session_id, + system_data: SystemData::new(), + up_time, + time_stamp: current_timestamp(), + node_type: TelemetryNodeType::Compactor, + }, + } + } +} diff --git a/src/storage/hummock_sdk/src/compact.rs b/src/storage/hummock_sdk/src/compact.rs index 25f6eb553a77f..7faf644972036 100644 --- a/src/storage/hummock_sdk/src/compact.rs +++ b/src/storage/hummock_sdk/src/compact.rs @@ -46,7 +46,7 @@ pub fn compact_task_to_string(compact_task: &CompactTask) -> String { let tables: Vec = level_entry .table_infos .iter() - .map(|table| format!("[id: {}, {}KB]", table.id, table.file_size / 1024)) + .map(|table| format!("[id: {}, {}KB]", table.get_sst_id(), table.file_size / 1024)) .collect(); writeln!(s, "Level {:?} {:?} ", level_entry.level_idx, tables).unwrap(); } @@ -76,8 +76,9 @@ pub fn append_sstable_info_to_string(s: &mut String, sstable_info: &SstableInfo) let ratio = sstable_info.stale_key_count * 100 / sstable_info.total_key_count; writeln!( s, - "SstableInfo: id={:?}, KeyRange=[{:?},{:?}], table_ids: {:?}, size={:?}KB, delete_ratio={:?}%", - sstable_info.id, + "SstableInfo: object id={:?}, SST id={:?}, KeyRange=[{:?},{:?}], table_ids: {:?}, size={:?}KB, delete_ratio={:?}%", + sstable_info.get_object_id(), + sstable_info.get_sst_id(), left_str, right_str, sstable_info.table_ids, @@ -88,8 +89,9 @@ pub fn append_sstable_info_to_string(s: &mut String, sstable_info: &SstableInfo) } else { writeln!( s, - "SstableInfo: id={:?}, KeyRange=[{:?},{:?}], table_ids: {:?}, size={:?}KB", - sstable_info.id, + "SstableInfo: object id={:?}, SST id={:?}, KeyRange=[{:?},{:?}], table_ids: {:?}, size={:?}KB", + sstable_info.get_object_id(), + sstable_info.get_sst_id(), left_str, right_str, sstable_info.table_ids, diff --git a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs index 6bf1aa7f0ac9e..46f06521e80fe 100644 --- a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs +++ b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs @@ -20,14 +20,14 @@ use risingwave_pb::hummock::group_delta::DeltaType; use risingwave_pb::hummock::hummock_version::Levels; use risingwave_pb::hummock::hummock_version_delta::GroupDeltas; use risingwave_pb::hummock::{ - CompactionConfig, GroupConstruct, GroupDestroy, GroupMetaChange, HummockVersion, - HummockVersionDelta, Level, LevelType, OverlappingLevel, SstableInfo, + CompactionConfig, GroupConstruct, GroupDestroy, GroupMetaChange, GroupTableChange, + HummockVersion, HummockVersionDelta, Level, LevelType, OverlappingLevel, SstableInfo, }; use super::StateTableId; use crate::compaction_group::StaticCompactionGroupId; use crate::prost_key_range::KeyRangeExt; -use crate::{can_concat, CompactionGroupId, HummockSstableId}; +use crate::{can_concat, CompactionGroupId, HummockSstableId, HummockSstableObjectId}; pub struct GroupDeltasSummary { pub delete_sst_levels: Vec, @@ -38,6 +38,7 @@ pub struct GroupDeltasSummary { pub group_construct: Option, pub group_destroy: Option, pub group_meta_changes: Vec, + pub group_table_change: Option, } pub fn summarize_group_deltas(group_deltas: &GroupDeltas) -> GroupDeltasSummary { @@ -49,6 +50,8 @@ pub fn summarize_group_deltas(group_deltas: &GroupDeltas) -> GroupDeltasSummary let mut group_construct = None; let mut group_destroy = None; let mut group_meta_changes = vec![]; + let mut group_table_change = None; + for group_delta in &group_deltas.group_deltas { match group_delta.get_delta_type().unwrap() { DeltaType::IntraLevel(intra_level) => { @@ -73,6 +76,9 @@ pub fn summarize_group_deltas(group_deltas: &GroupDeltas) -> GroupDeltasSummary DeltaType::GroupMetaChange(meta_delta) => { group_meta_changes.push(meta_delta.clone()); } + DeltaType::GroupTableChange(meta_delta) => { + group_table_change = Some(meta_delta.clone()); + } } } @@ -85,6 +91,7 @@ pub fn summarize_group_deltas(group_deltas: &GroupDeltas) -> GroupDeltasSummary group_construct, group_destroy, group_meta_changes, + group_table_change, } } @@ -103,22 +110,28 @@ pub trait HummockVersionExt { fn num_levels(&self, compaction_group_id: CompactionGroupId) -> usize; fn level_iter bool>(&self, compaction_group_id: CompactionGroupId, f: F); - fn get_sst_ids(&self) -> Vec; + fn get_object_ids(&self) -> Vec; } +pub type BranchedSstInfo = HashMap; + pub trait HummockVersionUpdateExt { + fn count_new_ssts_in_group_split( + &self, + parent_group_id: CompactionGroupId, + member_table_ids: HashSet, + ) -> u64; fn init_with_parent_group( &mut self, parent_group_id: CompactionGroupId, group_id: CompactionGroupId, - member_table_ids: &HashSet, + member_table_ids: HashSet, + new_sst_start_id: u64, ) -> Vec; fn apply_version_delta(&mut self, version_delta: &HummockVersionDelta) -> Vec; fn build_compaction_group_info(&self) -> HashMap; - fn build_branched_sst_info( - &self, - ) -> BTreeMap>; + fn build_branched_sst_info(&self) -> BTreeMap; } impl HummockVersionExt for HummockVersion { @@ -146,10 +159,16 @@ impl HummockVersionExt for HummockVersion { combined_levels } - fn get_sst_ids(&self) -> Vec { + /// This function does NOT dedup. + fn get_object_ids(&self) -> Vec { self.get_combined_levels() .iter() - .flat_map(|level| level.table_infos.iter().map(|table_info| table_info.id)) + .flat_map(|level| { + level + .table_infos + .iter() + .map(|table_info| table_info.get_object_id()) + }) .collect_vec() } @@ -182,22 +201,62 @@ impl HummockVersionExt for HummockVersion { } pub type SstSplitInfo = ( + // Object id. + HummockSstableObjectId, + // SST id. + HummockSstableId, + // Old SST id in parent group. HummockSstableId, - // Divide version. Counts the number of split of this SST. - u64, - // Level idx of the SSt. - u32, - // The SST is moved to the new group completely. It should be removed from parent group. - bool, + // New SST id in parent group. + Option, ); impl HummockVersionUpdateExt for HummockVersion { + fn count_new_ssts_in_group_split( + &self, + parent_group_id: CompactionGroupId, + member_table_ids: HashSet, + ) -> u64 { + self.levels + .get(&parent_group_id) + .map_or(0, |parent_levels| { + parent_levels + .l0 + .iter() + .flat_map(|l0| l0.get_sub_levels()) + .chain(parent_levels.get_levels().iter()) + .flat_map(|level| level.get_table_infos()) + .map(|sst_info| { + // `flag` is a bitmap + let mut flag = 0; + // `sst_info.table_ids` will never be empty. + for table_id in sst_info.get_table_ids() { + flag |= if member_table_ids.contains(table_id) { + 2 + } else { + 1 + }; + if flag == 3 { + break; + } + } + // We need to replace the SST id of the divided part in parent group with a + // new SST id when it's not a trivial adjust. View function + // `init_with_parent_group` for details. + flag - 1 + }) + .sum() + }) + } + fn init_with_parent_group( &mut self, parent_group_id: CompactionGroupId, group_id: CompactionGroupId, - member_table_ids: &HashSet, + member_table_ids: HashSet, + new_sst_start_id: u64, ) -> Vec { + let mut new_sst_id = new_sst_start_id; let mut split_id_vers = vec![]; if parent_group_id == StaticCompactionGroupId::NewCompactionGroup as CompactionGroupId || !self.levels.contains_key(&parent_group_id) @@ -208,96 +267,69 @@ impl HummockVersionUpdateExt for HummockVersion { .levels .get_many_mut([&parent_group_id, &group_id]) .unwrap(); - let remove_sst_stat_from_level = |level: &mut Level, sst: &SstableInfo| { - level.total_file_size -= sst.file_size; - level.uncompressed_file_size -= sst.uncompressed_file_size; - }; if let Some(ref mut l0) = parent_levels.l0 { for sub_level in &mut l0.sub_levels { - let mut insert_table_infos = vec![]; - for sst_info in &mut sub_level.table_infos { - if sst_info - .get_table_ids() - .iter() - .any(|table_id| member_table_ids.contains(table_id)) - { - let is_trivial = sst_info - .get_table_ids() - .iter() - .all(|table_id| member_table_ids.contains(table_id)); - if !is_trivial { - sst_info.divide_version += 1; - } - split_id_vers.push(( - sst_info.get_id(), - sst_info.get_divide_version(), - 0, - is_trivial, - )); - let mut branch_table_info = sst_info.clone(); - branch_table_info.table_ids = sst_info - .table_ids - .drain_filter(|table_id| member_table_ids.contains(table_id)) - .collect_vec(); - insert_table_infos.push(branch_table_info); + let target_l0 = cur_levels.l0.as_mut().unwrap(); + let mut target_level_idx = target_l0.sub_levels.len(); + for (idx, other) in target_l0.sub_levels.iter_mut().enumerate() { + if other.sub_level_id == sub_level.sub_level_id { + target_level_idx = idx; } } // Remove SST from sub level may result in empty sub level. It will be purged // whenever another compaction task is finished. - let removed = sub_level + let insert_table_infos = split_sst_info_for_level( + sub_level, + &mut split_id_vers, + &member_table_ids, + &mut new_sst_id, + ); + sub_level .table_infos .drain_filter(|sst_info| sst_info.table_ids.is_empty()) - .collect_vec(); - for removed_sst in removed { - remove_sst_stat_from_level(sub_level, &removed_sst); - } - add_new_sub_level( - cur_levels.l0.as_mut().unwrap(), - sub_level.get_sub_level_id(), + .for_each(|sst_info| { + sub_level.total_file_size -= sst_info.file_size; + sub_level.uncompressed_file_size -= sst_info.uncompressed_file_size; + l0.total_file_size -= sst_info.file_size; + l0.uncompressed_file_size -= sst_info.uncompressed_file_size; + }); + add_ssts_to_sub_level( + target_l0, + target_level_idx, + sub_level.sub_level_id, sub_level.level_type(), insert_table_infos, ); } } for (z, level) in parent_levels.levels.iter_mut().enumerate() { - let level_idx = level.get_level_idx(); - for sst_info in &mut level.table_infos { - if sst_info - .get_table_ids() - .iter() - .any(|table_id| member_table_ids.contains(table_id)) - { - let is_trivial = sst_info - .get_table_ids() - .iter() - .all(|table_id| member_table_ids.contains(table_id)); - if !is_trivial { - sst_info.divide_version += 1; - } - split_id_vers.push(( - sst_info.get_id(), - sst_info.get_divide_version(), - level_idx, - is_trivial, - )); - let mut branch_table_info = sst_info.clone(); - branch_table_info.table_ids = sst_info - .table_ids - .drain_filter(|table_id| member_table_ids.contains(table_id)) - .collect_vec(); - cur_levels.levels[z].total_file_size += branch_table_info.file_size; - cur_levels.levels[z].uncompressed_file_size += - branch_table_info.uncompressed_file_size; - cur_levels.levels[z].table_infos.push(branch_table_info); - } - } - let removed = level + let insert_table_infos = split_sst_info_for_level( + level, + &mut split_id_vers, + &member_table_ids, + &mut new_sst_id, + ); + cur_levels.levels[z].total_file_size += insert_table_infos + .iter() + .map(|sst| sst.file_size) + .sum::(); + cur_levels.levels[z].uncompressed_file_size += insert_table_infos + .iter() + .map(|sst| sst.uncompressed_file_size) + .sum::(); + cur_levels.levels[z].table_infos.extend(insert_table_infos); + cur_levels.levels[z].table_infos.sort_by(|sst1, sst2| { + let a = sst1.key_range.as_ref().unwrap(); + let b = sst2.key_range.as_ref().unwrap(); + a.compare(b) + }); + level .table_infos .drain_filter(|sst_info| sst_info.table_ids.is_empty()) - .collect_vec(); - for removed_sst in removed { - remove_sst_stat_from_level(level, &removed_sst); - } + .for_each(|sst_info| { + level.total_file_size -= sst_info.file_size; + level.uncompressed_file_size -= sst_info.uncompressed_file_size; + }); } split_id_vers } @@ -318,8 +350,24 @@ impl HummockVersionUpdateExt for HummockVersion { sst_split_info.extend(self.init_with_parent_group( parent_group_id, *compaction_group_id, - &HashSet::from_iter(group_construct.get_table_ids().iter().cloned()), + HashSet::from_iter(group_construct.table_ids.clone()), + group_construct.get_new_sst_start_id(), + )); + } else if let Some(group_change) = &summary.group_table_change { + sst_split_info.extend(self.init_with_parent_group( + group_change.origin_group_id, + group_change.target_group_id, + HashSet::from_iter(group_change.table_ids.clone()), + group_change.new_sst_start_id, )); + + let levels = self + .levels + .get_mut(&group_change.origin_group_id) + .expect("compaction group should exist"); + levels + .member_table_ids + .drain_filter(|t| group_change.table_ids.contains(t)); } let has_destroy = summary.group_destroy.is_some(); let levels = self @@ -334,6 +382,7 @@ impl HummockVersionUpdateExt for HummockVersion { levels .member_table_ids .drain_filter(|t| group_meta_delta.table_ids_remove.contains(t)); + levels.member_table_ids.sort(); } assert!( @@ -392,22 +441,26 @@ impl HummockVersionUpdateExt for HummockVersion { ret } - fn build_branched_sst_info( - &self, - ) -> BTreeMap> { - let mut ret: BTreeMap<_, HashMap<_, _>> = BTreeMap::new(); - for compaction_group_id in self.get_levels().keys() { - self.level_iter(*compaction_group_id, |level| { - for table_info in level.get_table_infos() { - let sst_id = table_info.get_id(); - ret.entry(sst_id) - .or_default() - .insert(*compaction_group_id, table_info.get_divide_version()); + fn build_branched_sst_info(&self) -> BTreeMap { + let mut ret: BTreeMap<_, _> = BTreeMap::new(); + for (compaction_group_id, group) in &self.levels { + let mut levels = vec![]; + levels.extend(group.l0.as_ref().unwrap().sub_levels.iter()); + levels.extend(group.levels.iter()); + for level in levels { + for table_info in &level.table_infos { + if table_info.sst_id == table_info.object_id { + continue; + } + let object_id = table_info.get_object_id(); + let entry: &mut BranchedSstInfo = ret.entry(object_id).or_default(); + if let Some(exist_sst_id) = entry.get(compaction_group_id) { + panic!("we do not allow more than one sst with the same object id in one grou. object-id: {}, duplicated sst id: {:?} and {}", object_id, exist_sst_id, table_info.sst_id); + } + entry.insert(*compaction_group_id, table_info.sst_id); } - true - }); + } } - ret.retain(|_, v| v.len() != 1 || *v.values().next().unwrap() != 0); ret } } @@ -416,6 +469,7 @@ pub trait HummockLevelsExt { fn get_level0(&self) -> &OverlappingLevel; fn get_level(&self, idx: usize) -> &Level; fn get_level_mut(&mut self, idx: usize) -> &mut Level; + fn count_ssts(&self) -> usize; fn apply_compact_ssts(&mut self, summary: GroupDeltasSummary); } @@ -432,6 +486,15 @@ impl HummockLevelsExt for Levels { &mut self.levels[level_idx - 1] } + fn count_ssts(&self) -> usize { + self.get_level0() + .get_sub_levels() + .iter() + .chain(self.get_levels().iter()) + .map(|level| level.get_table_infos().len()) + .sum() + } + fn apply_compact_ssts(&mut self, summary: GroupDeltasSummary) { let GroupDeltasSummary { delete_sst_levels, @@ -523,6 +586,51 @@ pub fn build_initial_compaction_group_levels( } } +fn split_sst_info_for_level( + level: &mut Level, + split_id_vers: &mut Vec, + member_table_ids: &HashSet, + new_sst_id: &mut u64, +) -> Vec { + // Remove SST from sub level may result in empty sub level. It will be purged + // whenever another compaction task is finished. + let mut removed = vec![]; + let mut insert_table_infos = vec![]; + for sst_info in &mut level.table_infos { + let removed_table_ids = sst_info + .table_ids + .iter() + .filter(|table_id| member_table_ids.contains(table_id)) + .cloned() + .collect_vec(); + if !removed_table_ids.is_empty() { + let is_trivial = removed_table_ids.len() == sst_info.table_ids.len(); + let mut branch_table_info = sst_info.clone(); + branch_table_info.sst_id = *new_sst_id; + *new_sst_id += 1; + let parent_old_sst_id = sst_info.get_sst_id(); + split_id_vers.push(( + branch_table_info.get_object_id(), + branch_table_info.get_sst_id(), + parent_old_sst_id, + if is_trivial { + None + } else { + sst_info.sst_id = *new_sst_id; + *new_sst_id += 1; + Some(sst_info.get_sst_id()) + }, + )); + if is_trivial { + sst_info.table_ids.clear(); + removed.push(sst_info.clone()); + } + insert_table_infos.push(branch_table_info); + } + } + insert_table_infos +} + pub fn try_get_compaction_group_id_by_table_id( version: &HummockVersion, table_id: StateTableId, @@ -549,11 +657,11 @@ pub fn get_member_table_ids(version: &HummockVersion) -> HashSet { .collect() } -/// Gets all SST ids in `group_id` -pub fn get_compaction_group_sst_ids( +/// Gets all SSTs in `group_id` +pub fn get_compaction_group_ssts( version: &HummockVersion, group_id: CompactionGroupId, -) -> Vec { +) -> Vec<(HummockSstableObjectId, HummockSstableId)> { let group_levels = version.get_compaction_group_levels(group_id); group_levels .l0 @@ -563,7 +671,12 @@ pub fn get_compaction_group_sst_ids( .iter() .rev() .chain(group_levels.levels.iter()) - .flat_map(|level| level.table_infos.iter().map(|table_info| table_info.id)) + .flat_map(|level| { + level + .table_infos + .iter() + .map(|table_info| (table_info.get_object_id(), table_info.get_sst_id())) + }) .collect_vec() } @@ -594,6 +707,37 @@ pub fn new_sub_level( } } +pub fn add_ssts_to_sub_level( + l0: &mut OverlappingLevel, + sub_level_idx: usize, + insert_sub_level_id: u64, + level_type: LevelType, + insert_table_infos: Vec, +) { + if sub_level_idx < l0.sub_levels.len() { + insert_table_infos.iter().for_each(|sst| { + l0.sub_levels[sub_level_idx].total_file_size += sst.file_size; + l0.sub_levels[sub_level_idx].uncompressed_file_size += sst.uncompressed_file_size; + l0.total_file_size += sst.file_size; + l0.uncompressed_file_size += sst.uncompressed_file_size; + }); + l0.sub_levels[sub_level_idx] + .table_infos + .extend(insert_table_infos); + if l0.sub_levels[sub_level_idx].level_type == LevelType::Nonoverlapping as i32 { + l0.sub_levels[sub_level_idx] + .table_infos + .sort_by(|sst1, sst2| { + let a = sst1.key_range.as_ref().unwrap(); + let b = sst2.key_range.as_ref().unwrap(); + a.compare(b) + }); + } + return; + } + add_new_sub_level(l0, insert_sub_level_id, level_type, insert_table_infos); +} + pub fn add_new_sub_level( l0: &mut OverlappingLevel, insert_sub_level_id: u64, @@ -628,18 +772,21 @@ pub fn build_version_delta_after_version(version: &HummockVersion) -> HummockVer trivial_move: false, max_committed_epoch: version.max_committed_epoch, group_deltas: Default::default(), - gc_sst_ids: vec![], + gc_object_ids: vec![], } } /// Delete sstables if the table id is in the id set. /// /// Return `true` if some sst is deleted, and `false` is the deletion is trivial -fn level_delete_ssts(operand: &mut Level, delete_sst_ids_superset: &HashSet) -> bool { +fn level_delete_ssts( + operand: &mut Level, + delete_sst_ids_superset: &HashSet, +) -> bool { let original_len = operand.table_infos.len(); operand .table_infos - .retain(|table| !delete_sst_ids_superset.contains(&table.id)); + .retain(|table| !delete_sst_ids_superset.contains(&table.sst_id)); operand.total_file_size = operand .table_infos .iter() @@ -691,7 +838,7 @@ mod tests { }; #[test] - fn test_get_sst_ids() { + fn test_get_sst_object_ids() { let mut version = HummockVersion { id: 0, levels: HashMap::from_iter([( @@ -709,7 +856,7 @@ mod tests { max_committed_epoch: 0, safe_epoch: 0, }; - assert_eq!(version.get_sst_ids().len(), 0); + assert_eq!(version.get_object_ids().len(), 0); // Add to sub level version @@ -722,22 +869,24 @@ mod tests { .sub_levels .push(Level { table_infos: vec![SstableInfo { - id: 11, + object_id: 11, + sst_id: 11, ..Default::default() }], ..Default::default() }); - assert_eq!(version.get_sst_ids().len(), 1); + assert_eq!(version.get_object_ids().len(), 1); // Add to non sub level version.levels.get_mut(&0).unwrap().levels.push(Level { table_infos: vec![SstableInfo { - id: 22, + object_id: 22, + sst_id: 22, ..Default::default() }], ..Default::default() }); - assert_eq!(version.get_sst_ids().len(), 2); + assert_eq!(version.get_object_ids().len(), 2); } #[test] @@ -801,7 +950,8 @@ mod tests { delta_type: Some(DeltaType::IntraLevel(IntraLevelDelta { level_idx: 1, inserted_table_infos: vec![SstableInfo { - id: 1, + object_id: 1, + sst_id: 1, ..Default::default() }], ..Default::default() @@ -824,7 +974,8 @@ mod tests { level_idx: 1, level_type: LevelType::Nonoverlapping as i32, table_infos: vec![SstableInfo { - id: 1, + object_id: 1, + sst_id: 1, ..Default::default() }], ..Default::default() diff --git a/src/storage/hummock_sdk/src/filter_key_extractor.rs b/src/storage/hummock_sdk/src/filter_key_extractor.rs index 1d9817646a58e..e3e81e681beaa 100644 --- a/src/storage/hummock_sdk/src/filter_key_extractor.rs +++ b/src/storage/hummock_sdk/src/filter_key_extractor.rs @@ -173,9 +173,7 @@ impl SchemaFilterKeyExtractor { let order_types: Vec = table_catalog .pk .iter() - .map(|col_order| { - OrderType::from_protobuf(&col_order.get_order_type().unwrap().direction()) - }) + .map(|col_order| OrderType::from_protobuf(col_order.get_order_type().unwrap())) .collect(); Self { @@ -340,7 +338,7 @@ mod tests { use bytes::{BufMut, BytesMut}; use itertools::Itertools; - use risingwave_common::catalog::{ColumnDesc, ColumnId}; + use risingwave_common::catalog::ColumnDesc; use risingwave_common::constants::hummock::PROPERTIES_RETENTION_SECOND_KEY; use risingwave_common::hash::VirtualNode; use risingwave_common::row::OwnedRow; @@ -349,9 +347,9 @@ mod tests { use risingwave_common::util::ordered::OrderedRowSerde; use risingwave_common::util::sort_util::OrderType; use risingwave_pb::catalog::table::TableType; - use risingwave_pb::catalog::Table as ProstTable; - use risingwave_pb::common::{PbColumnOrder, PbDirection, PbOrderType}; - use risingwave_pb::plan_common::ColumnCatalog as ProstColumnCatalog; + use risingwave_pb::catalog::PbTable; + use risingwave_pb::common::{PbColumnOrder, PbDirection, PbNullsAre, PbOrderType}; + use risingwave_pb::plan_common::PbColumnCatalog; use tokio::task; use super::{DummyFilterKeyExtractor, FilterKeyExtractor, SchemaFilterKeyExtractor}; @@ -361,6 +359,10 @@ mod tests { }; use crate::key::TABLE_PREFIX_LEN; + const fn dummy_vnode() -> [u8; VirtualNode::SIZE] { + VirtualNode::from_index(233).to_be_bytes() + } + #[test] fn test_default_filter_key_extractor() { let dummy_filter_key_extractor = DummyFilterKeyExtractor::default(); @@ -375,63 +377,35 @@ mod tests { assert_eq!(full_key, output_key); } - fn build_table_with_prefix_column_num(column_count: u32) -> ProstTable { - ProstTable { + fn build_table_with_prefix_column_num(column_count: u32) -> PbTable { + PbTable { id: 0, schema_id: 0, database_id: 0, name: "test".to_string(), table_type: TableType::Table as i32, columns: vec![ - ProstColumnCatalog { + PbColumnCatalog { column_desc: Some( - (&ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(0), - name: "_row_id".to_string(), - field_descs: vec![], - type_name: "".to_string(), - }) - .into(), + (&ColumnDesc::new_atomic(DataType::Int64, "_row_id", 0)).into(), ), is_hidden: true, }, - ProstColumnCatalog { + PbColumnCatalog { column_desc: Some( - (&ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(0), - name: "col_1".to_string(), - field_descs: vec![], - type_name: "Int64".to_string(), - }) - .into(), + (&ColumnDesc::new_atomic(DataType::Int64, "col_1", 0)).into(), ), is_hidden: false, }, - ProstColumnCatalog { + PbColumnCatalog { column_desc: Some( - (&ColumnDesc { - data_type: DataType::Float64, - column_id: ColumnId::new(0), - name: "col_2".to_string(), - field_descs: vec![], - type_name: "Float64".to_string(), - }) - .into(), + (&ColumnDesc::new_atomic(DataType::Float64, "col_2", 0)).into(), ), is_hidden: false, }, - ProstColumnCatalog { + PbColumnCatalog { column_desc: Some( - (&ColumnDesc { - data_type: DataType::Varchar, - column_id: ColumnId::new(0), - name: "col_3".to_string(), - field_descs: vec![], - type_name: "Varchar".to_string(), - }) - .into(), + (&ColumnDesc::new_atomic(DataType::Varchar, "col_3", 0)).into(), ), is_hidden: false, }, @@ -441,12 +415,14 @@ mod tests { column_index: 1, order_type: Some(PbOrderType { direction: PbDirection::Ascending as _, + nulls_are: PbNullsAre::Largest as _, }), }, PbColumnOrder { column_index: 3, order_type: Some(PbOrderType { direction: PbDirection::Ascending as _, + nulls_are: PbNullsAre::Largest as _, }), }, ], @@ -478,7 +454,7 @@ mod tests { let prost_table = build_table_with_prefix_column_num(1); let schema_filter_key_extractor = SchemaFilterKeyExtractor::new(&prost_table); - let order_types: Vec = vec![OrderType::Ascending, OrderType::Ascending]; + let order_types: Vec = vec![OrderType::ascending(), OrderType::ascending()]; let schema = vec![DataType::Int64, DataType::Varchar]; let serializer = OrderedRowSerde::new(schema, order_types); let row = OwnedRow::new(vec![ @@ -494,8 +470,7 @@ mod tests { buf.to_vec() }; - let vnode_prefix = "v".as_bytes(); - assert_eq!(VirtualNode::SIZE, vnode_prefix.len()); + let vnode_prefix = &dummy_vnode()[..]; let full_key = [&table_prefix, vnode_prefix, &row_bytes].concat(); let output_key = schema_filter_key_extractor.extract(&full_key); @@ -513,7 +488,7 @@ mod tests { 1, Arc::new(FilterKeyExtractorImpl::Schema(schema_filter_key_extractor)), ); - let order_types: Vec = vec![OrderType::Ascending, OrderType::Ascending]; + let order_types: Vec = vec![OrderType::ascending(), OrderType::ascending()]; let schema = vec![DataType::Int64, DataType::Varchar]; let serializer = OrderedRowSerde::new(schema, order_types); let row = OwnedRow::new(vec![ @@ -529,14 +504,13 @@ mod tests { buf.to_vec() }; - let vnode_prefix = "v".as_bytes(); - assert_eq!(VirtualNode::SIZE, vnode_prefix.len()); + let vnode_prefix = &dummy_vnode()[..]; let full_key = [&table_prefix, vnode_prefix, &row_bytes].concat(); let output_key = multi_filter_key_extractor.extract(&full_key); let data_types = vec![DataType::Int64]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let deserializer = OrderedRowSerde::new(data_types, order_types); let pk_prefix_len = deserializer.deserialize_prefix_len(&row_bytes, 1).unwrap(); @@ -551,7 +525,7 @@ mod tests { 2, Arc::new(FilterKeyExtractorImpl::Schema(schema_filter_key_extractor)), ); - let order_types: Vec = vec![OrderType::Ascending, OrderType::Ascending]; + let order_types: Vec = vec![OrderType::ascending(), OrderType::ascending()]; let schema = vec![DataType::Int64, DataType::Varchar]; let serializer = OrderedRowSerde::new(schema, order_types); let row = OwnedRow::new(vec![ @@ -567,14 +541,13 @@ mod tests { buf.to_vec() }; - let vnode_prefix = "v".as_bytes(); - assert_eq!(VirtualNode::SIZE, vnode_prefix.len()); + let vnode_prefix = &dummy_vnode()[..]; let full_key = [&table_prefix, vnode_prefix, &row_bytes].concat(); let output_key = multi_filter_key_extractor.extract(&full_key); let data_types = vec![DataType::Int64, DataType::Varchar]; - let order_types = vec![OrderType::Ascending, OrderType::Ascending]; + let order_types = vec![OrderType::ascending(), OrderType::ascending()]; let deserializer = OrderedRowSerde::new(data_types, order_types); let pk_prefix_len = deserializer.deserialize_prefix_len(&row_bytes, 1).unwrap(); diff --git a/src/storage/hummock_sdk/src/key.rs b/src/storage/hummock_sdk/src/key.rs index b188710703480..a2dee8ea7940a 100644 --- a/src/storage/hummock_sdk/src/key.rs +++ b/src/storage/hummock_sdk/src/key.rs @@ -372,9 +372,7 @@ pub struct TableKey>(pub T); impl> Debug for TableKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TableKey") - .field("table_key", &self.0.as_ref().to_vec()) - .finish() + write!(f, "TableKey {{ {} }}", hex::encode(self.0.as_ref())) } } @@ -408,7 +406,7 @@ pub fn map_table_key_range(range: (Bound, Bound) /// will group these two values into one struct for convenient filtering. /// /// The encoded format is | `table_id` | `table_key` |. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct UserKey> { // When comparing `UserKey`, we first compare `table_id`, then `table_key`. So the order of // declaration matters. @@ -416,6 +414,16 @@ pub struct UserKey> { pub table_key: TableKey, } +impl> Debug for UserKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "UserKey {{ {}, {:?} }}", + self.table_id.table_id, self.table_key + ) + } +} + impl> UserKey { pub fn new(table_id: TableId, table_key: TableKey) -> Self { Self { @@ -438,6 +446,10 @@ impl> UserKey { buf.put_slice(self.table_key.as_ref()); } + pub fn encode_table_key_into(&self, buf: &mut impl BufMut) { + buf.put_slice(self.table_key.as_ref()); + } + /// Encode in to a buffer. pub fn encode_length_prefixed(&self, buf: &mut impl BufMut) { buf.put_u32(self.table_id.table_id()); @@ -543,12 +555,18 @@ impl UserKey> { /// [`FullKey`] is an internal concept in storage. It associates [`UserKey`] with an epoch. /// /// The encoded format is | `user_key` | `epoch` |. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Default)] pub struct FullKey> { pub user_key: UserKey, pub epoch: HummockEpoch, } +impl> Debug for FullKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "FullKey {{ {:?}, {} }}", self.user_key, self.epoch) + } +} + impl> FullKey { pub fn new(table_id: TableId, table_key: TableKey, epoch: HummockEpoch) -> Self { Self { @@ -583,6 +601,12 @@ impl> FullKey { buf } + // Encode in to a buffer. + pub fn encode_into_without_table_id(&self, buf: &mut impl BufMut) { + self.user_key.encode_table_key_into(buf); + buf.put_u64(self.epoch); + } + pub fn encode_reverse_epoch(&self) -> Vec { let mut buf = Vec::with_capacity( TABLE_PREFIX_LEN + self.user_key.table_key.as_ref().len() + EPOCH_LEN, @@ -614,6 +638,20 @@ impl<'a> FullKey<&'a [u8]> { } } + /// Construct a [`FullKey`] from a byte slice without `table_id` encoded. + pub fn from_slice_without_table_id( + table_id: TableId, + slice_without_table_id: &'a [u8], + ) -> Self { + let epoch_pos = slice_without_table_id.len() - EPOCH_LEN; + let epoch = (&slice_without_table_id[epoch_pos..]).get_u64(); + + Self { + user_key: UserKey::new(table_id, TableKey(&slice_without_table_id[..epoch_pos])), + epoch, + } + } + /// Construct a [`FullKey`] from a byte slice. pub fn decode_reverse_epoch(slice: &'a [u8]) -> Self { let epoch_pos = slice.len() - EPOCH_LEN; diff --git a/src/storage/hummock_sdk/src/lib.rs b/src/storage/hummock_sdk/src/lib.rs index 32be30d8e210e..4a6bc7012a5b1 100644 --- a/src/storage/hummock_sdk/src/lib.rs +++ b/src/storage/hummock_sdk/src/lib.rs @@ -32,7 +32,7 @@ use risingwave_pb::hummock::SstableInfo; use crate::compaction_group::StaticCompactionGroupId; use crate::key_range::KeyRangeCommon; -use crate::table_stats::{to_prost_table_stats_map, ProstTableStatsMap, TableStatsMap}; +use crate::table_stats::{to_prost_table_stats_map, PbTableStatsMap, TableStatsMap}; pub mod compact; pub mod compaction_group; @@ -42,6 +42,7 @@ pub mod key_range; pub mod prost_key_range; pub mod table_stats; +pub type HummockSstableObjectId = u64; pub type HummockSstableId = u64; pub type HummockRefCount = u64; pub type HummockVersionId = u64; @@ -51,6 +52,8 @@ pub type HummockCompactionTaskId = u64; pub type CompactionGroupId = u64; pub const INVALID_VERSION_ID: HummockVersionId = 0; pub const FIRST_VERSION_ID: HummockVersionId = 1; +pub const SPLIT_TABLE_COMPACTION_GROUP_ID_HEAD: u64 = 1u64 << 56; +pub const SINGLE_TABLE_COMPACTION_GROUP_ID_HEAD: u64 = 2u64 << 56; #[macro_export] /// This is wrapper for `info` log. @@ -130,14 +133,14 @@ impl LocalSstableInfo { pub struct ExtendedSstableInfo { pub compaction_group_id: CompactionGroupId, pub sst_info: SstableInfo, - pub table_stats: ProstTableStatsMap, + pub table_stats: PbTableStatsMap, } impl ExtendedSstableInfo { pub fn new( compaction_group_id: CompactionGroupId, sst_info: SstableInfo, - table_stats: ProstTableStatsMap, + table_stats: PbTableStatsMap, ) -> Self { Self { compaction_group_id, @@ -150,7 +153,7 @@ impl ExtendedSstableInfo { compaction_group_id: CompactionGroupId, sst_info: SstableInfo, ) -> Self { - Self::new(compaction_group_id, sst_info, ProstTableStatsMap::default()) + Self::new(compaction_group_id, sst_info, PbTableStatsMap::default()) } } @@ -209,19 +212,19 @@ impl HummockReadEpoch { } } } -pub struct SstIdRange { +pub struct SstObjectIdRange { // inclusive - pub start_id: HummockSstableId, + pub start_id: HummockSstableObjectId, // exclusive - pub end_id: HummockSstableId, + pub end_id: HummockSstableObjectId, } -impl SstIdRange { - pub fn new(start_id: HummockSstableId, end_id: HummockSstableId) -> Self { +impl SstObjectIdRange { + pub fn new(start_id: HummockSstableObjectId, end_id: HummockSstableObjectId) -> Self { Self { start_id, end_id } } - pub fn peek_next_sst_id(&self) -> Option { + pub fn peek_next_sst_object_id(&self) -> Option { if self.start_id < self.end_id { return Some(self.start_id); } @@ -229,8 +232,8 @@ impl SstIdRange { } /// Pops and returns next SST id. - pub fn get_next_sst_id(&mut self) -> Option { - let next_id = self.peek_next_sst_id(); + pub fn get_next_sst_object_id(&mut self) -> Option { + let next_id = self.peek_next_sst_object_id(); self.start_id += 1; next_id } diff --git a/src/storage/hummock_sdk/src/table_stats.rs b/src/storage/hummock_sdk/src/table_stats.rs index 19732e30cba13..02da4999bebc3 100644 --- a/src/storage/hummock_sdk/src/table_stats.rs +++ b/src/storage/hummock_sdk/src/table_stats.rs @@ -15,13 +15,13 @@ use std::borrow::Borrow; use std::collections::{HashMap, HashSet}; -use risingwave_pb::hummock::{HummockVersion, TableStats as ProstTableStats}; +use risingwave_pb::hummock::{HummockVersion, PbTableStats}; use crate::compaction_group::hummock_version_ext::HummockVersionExt; pub type TableStatsMap = HashMap; -pub type ProstTableStatsMap = HashMap; +pub type PbTableStatsMap = HashMap; #[derive(Default, Debug, Clone)] pub struct TableStats { @@ -30,7 +30,7 @@ pub struct TableStats { pub total_key_count: i64, } -impl From<&TableStats> for ProstTableStats { +impl From<&TableStats> for PbTableStats { fn from(value: &TableStats) -> Self { Self { total_key_size: value.total_key_size, @@ -40,14 +40,14 @@ impl From<&TableStats> for ProstTableStats { } } -impl From for ProstTableStats { +impl From for PbTableStats { fn from(value: TableStats) -> Self { (&value).into() } } -impl From<&ProstTableStats> for TableStats { - fn from(value: &ProstTableStats) -> Self { +impl From<&PbTableStats> for TableStats { + fn from(value: &PbTableStats) -> Self { Self { total_key_size: value.total_key_size, total_value_size: value.total_value_size, @@ -64,13 +64,13 @@ impl TableStats { } } -pub fn add_prost_table_stats(this: &mut ProstTableStats, other: &ProstTableStats) { +pub fn add_prost_table_stats(this: &mut PbTableStats, other: &PbTableStats) { this.total_key_size += other.total_key_size; this.total_value_size += other.total_value_size; this.total_key_count += other.total_key_count; } -pub fn add_prost_table_stats_map(this: &mut ProstTableStatsMap, other: &ProstTableStatsMap) { +pub fn add_prost_table_stats_map(this: &mut PbTableStatsMap, other: &PbTableStatsMap) { for (table_id, stats) in other { add_prost_table_stats(this.entry(*table_id).or_default(), stats); } @@ -84,7 +84,7 @@ pub fn add_table_stats_map(this: &mut TableStatsMap, other: &TableStatsMap) { pub fn to_prost_table_stats_map( table_stats: impl Borrow, -) -> HashMap { +) -> HashMap { table_stats .borrow() .iter() @@ -93,7 +93,7 @@ pub fn to_prost_table_stats_map( } pub fn from_prost_table_stats_map( - table_stats: impl Borrow>, + table_stats: impl Borrow>, ) -> HashMap { table_stats .borrow() @@ -103,7 +103,7 @@ pub fn from_prost_table_stats_map( } pub fn purge_prost_table_stats( - table_stats: &mut ProstTableStatsMap, + table_stats: &mut PbTableStatsMap, hummock_version: &HummockVersion, ) { let mut all_tables_in_version: HashSet = HashSet::default(); diff --git a/src/storage/hummock_test/src/compactor_tests.rs b/src/storage/hummock_test/src/compactor_tests.rs index 9d61a3c11f0b4..6dd6851a8cb84 100644 --- a/src/storage/hummock_test/src/compactor_tests.rs +++ b/src/storage/hummock_test/src/compactor_tests.rs @@ -47,7 +47,8 @@ pub(crate) mod tests { use risingwave_storage::hummock::iterator::test_utils::mock_sstable_store; use risingwave_storage::hummock::sstable_store::SstableStoreRef; use risingwave_storage::hummock::{ - HummockStorage as GlobalHummockStorage, HummockStorage, MemoryLimiter, SstableIdManager, + HummockStorage as GlobalHummockStorage, HummockStorage, MemoryLimiter, + SstableObjectIdManager, }; use risingwave_storage::monitor::{CompactorMetrics, StoreLocalStatistic}; use risingwave_storage::opts::StorageOpts; @@ -185,7 +186,7 @@ pub(crate) mod tests { compaction_executor: Arc::new(CompactionExecutor::new(Some(1))), read_memory_limiter: MemoryLimiter::unlimit(), filter_key_extractor_manager, - sstable_id_manager: Arc::new(SstableIdManager::new( + sstable_object_id_manager: Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), options.sstable_id_remote_fetch_number, )), @@ -253,7 +254,7 @@ pub(crate) mod tests { val.extend_from_slice(&compact_task.watermark.to_be_bytes()); let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let compactor = hummock_manager_ref.get_idle_compactor().await.unwrap(); hummock_manager_ref .assign_compaction_task(&compact_task, compactor.context_id()) @@ -378,7 +379,7 @@ pub(crate) mod tests { compact_task.current_epoch_time = 0; let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let compactor = hummock_manager_ref.get_idle_compactor().await.unwrap(); hummock_manager_ref .assign_compaction_task(&compact_task, compactor.context_id()) @@ -728,13 +729,12 @@ pub(crate) mod tests { .await .unwrap() .unwrap(); - compact_task.existing_table_ids.push(2); let compaction_filter_flag = CompactionFilterFlag::STATE_CLEAN | CompactionFilterFlag::TTL; compact_task.compaction_filter_mask = compaction_filter_flag.bits(); // 3. pick compactor and assign let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let compactor = hummock_manager_ref.get_idle_compactor().await.unwrap(); hummock_manager_ref .assign_compaction_task(&compact_task, compactor.context_id()) @@ -902,7 +902,6 @@ pub(crate) mod tests { .unwrap() .unwrap(); - compact_task.existing_table_ids.push(existing_table_id); let compaction_filter_flag = CompactionFilterFlag::STATE_CLEAN | CompactionFilterFlag::TTL; compact_task.compaction_filter_mask = compaction_filter_flag.bits(); let retention_seconds_expire_second = 1; @@ -915,7 +914,7 @@ pub(crate) mod tests { compact_task.current_epoch_time = epoch; let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let compactor = hummock_manager_ref.get_idle_compactor().await.unwrap(); hummock_manager_ref .assign_compaction_task(&compact_task, compactor.context_id()) @@ -1091,7 +1090,6 @@ pub(crate) mod tests { kv_count, ); - compact_task.existing_table_ids.push(existing_table_id); let compaction_filter_flag = CompactionFilterFlag::STATE_CLEAN | CompactionFilterFlag::TTL; compact_task.compaction_filter_mask = compaction_filter_flag.bits(); // compact_task.table_options = @@ -1099,7 +1097,7 @@ pub(crate) mod tests { compact_task.current_epoch_time = epoch; let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let compactor = hummock_manager_ref.get_idle_compactor().await.unwrap(); hummock_manager_ref .assign_compaction_task(&compact_task, compactor.context_id()) @@ -1223,7 +1221,7 @@ pub(crate) mod tests { flush_and_commit(&hummock_meta_client, &storage, 130).await; let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); // 2. get compact task let manual_compcation_option = ManualCompactionOption { diff --git a/src/storage/hummock_test/src/hummock_read_version_tests.rs b/src/storage/hummock_test/src/hummock_read_version_tests.rs index e2b316435cefe..7de250257bd33 100644 --- a/src/storage/hummock_test/src/hummock_read_version_tests.rs +++ b/src/storage/hummock_test/src/hummock_read_version_tests.rs @@ -151,7 +151,8 @@ async fn test_read_version_basic() { let dummy_sst = StagingSstableInfo::new( vec![ LocalSstableInfo::for_test(SstableInfo { - id: 1, + object_id: 1, + sst_id: 1, key_range: Some(KeyRange { left: key_with_epoch(iterator_test_user_key_of(1).encode(), 1), right: key_with_epoch(iterator_test_user_key_of(2).encode(), 2), @@ -162,13 +163,13 @@ async fn test_read_version_basic() { meta_offset: 1, stale_key_count: 1, total_key_count: 1, - divide_version: 0, uncompressed_file_size: 1, min_epoch: 0, max_epoch: 0, }), LocalSstableInfo::for_test(SstableInfo { - id: 2, + object_id: 2, + sst_id: 2, key_range: Some(KeyRange { left: key_with_epoch(iterator_test_user_key_of(3).encode(), 3), right: key_with_epoch(iterator_test_user_key_of(3).encode(), 3), @@ -179,7 +180,6 @@ async fn test_read_version_basic() { meta_offset: 1, stale_key_count: 1, total_key_count: 1, - divide_version: 0, uncompressed_file_size: 1, min_epoch: 0, max_epoch: 0, @@ -199,8 +199,8 @@ async fn test_read_version_basic() { // test clear related batch after update sst // after update sst - // imm(0, 1, 2) => sst{sst_id: 1} - // staging => {imm(3, 4, 5), sst[{sst_id: 1}, {sst_id: 2}]} + // imm(0, 1, 2) => sst{sst_object_id: 1} + // staging => {imm(3, 4, 5), sst[{sst_object_id: 1}, {sst_object_id: 2}]} let staging = read_version.staging(); assert_eq!(3, read_version.staging().imm.len()); assert_eq!(1, read_version.staging().sst.len()); @@ -233,8 +233,8 @@ async fn test_read_version_basic() { let staging_ssts = staging_sst_iter.cloned().collect_vec(); assert_eq!(2, staging_ssts.len()); - assert_eq!(1, staging_ssts[0].id); - assert_eq!(2, staging_ssts[1].id); + assert_eq!(1, staging_ssts[0].get_object_id()); + assert_eq!(2, staging_ssts[1].get_object_id()); } { @@ -257,7 +257,7 @@ async fn test_read_version_basic() { let staging_ssts = staging_sst_iter.cloned().collect_vec(); assert_eq!(1, staging_ssts.len()); - assert_eq!(2, staging_ssts[0].id); + assert_eq!(2, staging_ssts[0].get_object_id()); } } diff --git a/src/storage/hummock_test/src/local_version_manager_tests.rs b/src/storage/hummock_test/src/local_version_manager_tests.rs index 6e2f507785a9e..cca13c2625c41 100644 --- a/src/storage/hummock_test/src/local_version_manager_tests.rs +++ b/src/storage/hummock_test/src/local_version_manager_tests.rs @@ -19,7 +19,7 @@ use bytes::Bytes; use risingwave_common::catalog::TableId; use risingwave_hummock_sdk::compact::CompactorRuntimeConfig; use risingwave_hummock_sdk::filter_key_extractor::FilterKeyExtractorManager; -use risingwave_hummock_sdk::HummockSstableId; +use risingwave_hummock_sdk::HummockSstableObjectId; use risingwave_meta::hummock::test_utils::{ setup_compute_env, update_filter_key_extractor_for_table_ids, }; @@ -37,7 +37,7 @@ use risingwave_storage::hummock::shared_buffer::UncommittedData; use risingwave_storage::hummock::test_utils::{ default_opts_for_test, gen_dummy_batch, gen_dummy_batch_several_keys, gen_dummy_sst_info, }; -use risingwave_storage::hummock::SstableIdManager; +use risingwave_storage::hummock::SstableObjectIdManager; use risingwave_storage::monitor::CompactorMetrics; use risingwave_storage::opts::StorageOpts; use risingwave_storage::storage_value::StorageValue; @@ -60,7 +60,7 @@ pub async fn prepare_local_version_manager( worker_node.id, )); - let sstable_id_manager = Arc::new(SstableIdManager::new( + let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), opt.sstable_id_remote_fetch_number, )); @@ -74,7 +74,7 @@ pub async fn prepare_local_version_manager( sstable_store, hummock_meta_client, Arc::new(CompactorMetrics::unused()), - sstable_id_manager, + sstable_object_id_manager, filter_key_extractor_manager, CompactorRuntimeConfig::default(), )); @@ -459,9 +459,9 @@ async fn test_clear_shared_buffer() { assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); } @@ -482,9 +482,9 @@ async fn test_sst_gc_watermark() { assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); for i in 0..2 { @@ -496,9 +496,9 @@ async fn test_sst_gc_watermark() { assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); for epoch in &epochs { @@ -510,8 +510,8 @@ async fn test_sst_gc_watermark() { // Global watermark determined by epoch 0. assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), + .sstable_object_id_manager() + .global_watermark_object_id(), 1 ); } @@ -526,8 +526,8 @@ async fn test_sst_gc_watermark() { // Global watermark determined by epoch 1. assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), + .sstable_object_id_manager() + .global_watermark_object_id(), 2 ); @@ -539,8 +539,8 @@ async fn test_sst_gc_watermark() { local_version_manager.try_update_pinned_version(Payload::PinnedVersion(version)); assert_eq!( local_version_manager - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); } diff --git a/src/storage/hummock_test/src/mock_notification_client.rs b/src/storage/hummock_test/src/mock_notification_client.rs index ea8e068c45a07..183bb06014609 100644 --- a/src/storage/hummock_test/src/mock_notification_client.rs +++ b/src/storage/hummock_test/src/mock_notification_client.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashMap; use std::sync::Arc; use risingwave_common::error::Result; @@ -22,6 +23,7 @@ use risingwave_meta::manager::{MessageStatus, MetaSrvEnv, NotificationManagerRef use risingwave_meta::storage::{MemStore, MetaStore}; use risingwave_pb::backup_service::MetaBackupManifestId; use risingwave_pb::common::WorkerNode; +use risingwave_pb::hummock::WriteLimits; use risingwave_pb::meta::{MetaSnapshot, SubscribeResponse, SubscribeType}; use tokio::sync::mpsc::UnboundedReceiver; @@ -62,6 +64,9 @@ impl NotificationClient for MockNotificationClient { hummock_version: Some(hummock_version), version: Some(Default::default()), meta_backup_manifest_id: Some(MetaBackupManifestId { id: 0 }), + hummock_write_limits: Some(WriteLimits { + write_limits: HashMap::new(), + }), ..Default::default() }; diff --git a/src/storage/hummock_test/src/state_store_tests.rs b/src/storage/hummock_test/src/state_store_tests.rs index 475aa2bce57cf..076ebffe69cbe 100644 --- a/src/storage/hummock_test/src/state_store_tests.rs +++ b/src/storage/hummock_test/src/state_store_tests.rs @@ -20,7 +20,9 @@ use bytes::Bytes; use futures::{pin_mut, TryStreamExt}; use risingwave_common::catalog::TableId; use risingwave_hummock_sdk::key::FullKey; -use risingwave_hummock_sdk::{HummockEpoch, HummockReadEpoch, HummockSstableId, LocalSstableInfo}; +use risingwave_hummock_sdk::{ + HummockEpoch, HummockReadEpoch, HummockSstableObjectId, LocalSstableInfo, +}; use risingwave_meta::hummock::test_utils::setup_compute_env; use risingwave_meta::hummock::MockHummockMetaClient; use risingwave_rpc_client::HummockMetaClient; @@ -1295,9 +1297,9 @@ async fn test_gc_watermark_and_clear_shared_buffer() { assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); let mut local_hummock_storage = hummock_storage @@ -1317,9 +1319,9 @@ async fn test_gc_watermark_and_clear_shared_buffer() { assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); let epoch2 = initial_epoch + 2; @@ -1331,34 +1333,34 @@ async fn test_gc_watermark_and_clear_shared_buffer() { assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); - let min_sst_id = |sync_result: &SyncResult| { + let min_object_id = |sync_result: &SyncResult| { sync_result .uncommitted_ssts .iter() - .map(|LocalSstableInfo { sst_info, .. }| sst_info.id) + .map(|LocalSstableInfo { sst_info, .. }| sst_info.get_object_id()) .min() .unwrap() }; local_hummock_storage.seal_current_epoch(u64::MAX); let sync_result1 = hummock_storage.seal_and_sync_epoch(epoch1).await.unwrap(); - let min_sst_id_epoch1 = min_sst_id(&sync_result1); + let min_object_id_epoch1 = min_object_id(&sync_result1); assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - min_sst_id_epoch1, + .sstable_object_id_manager() + .global_watermark_object_id(), + min_object_id_epoch1, ); let sync_result2 = hummock_storage.seal_and_sync_epoch(epoch2).await.unwrap(); - let min_sst_id_epoch2 = min_sst_id(&sync_result2); + let min_object_id_epoch2 = min_object_id(&sync_result2); assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - min_sst_id_epoch1, + .sstable_object_id_manager() + .global_watermark_object_id(), + min_object_id_epoch1, ); meta_client .commit_epoch(epoch1, sync_result1.uncommitted_ssts) @@ -1371,9 +1373,9 @@ async fn test_gc_watermark_and_clear_shared_buffer() { assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - min_sst_id_epoch2, + .sstable_object_id_manager() + .global_watermark_object_id(), + min_object_id_epoch2, ); hummock_storage.clear_shared_buffer().await.unwrap(); @@ -1386,8 +1388,8 @@ async fn test_gc_watermark_and_clear_shared_buffer() { assert_eq!(read_version.committed().max_committed_epoch(), epoch1); assert_eq!( hummock_storage - .sstable_id_manager() - .global_watermark_sst_id(), - HummockSstableId::MAX + .sstable_object_id_manager() + .global_watermark_object_id(), + HummockSstableObjectId::MAX ); } diff --git a/src/storage/hummock_test/src/sync_point_tests.rs b/src/storage/hummock_test/src/sync_point_tests.rs index 122286d0b35f9..76eea62385179 100644 --- a/src/storage/hummock_test/src/sync_point_tests.rs +++ b/src/storage/hummock_test/src/sync_point_tests.rs @@ -34,7 +34,7 @@ use risingwave_meta::storage::MemStore; use risingwave_pb::hummock::compact_task::TaskStatus; use risingwave_rpc_client::HummockMetaClient; use risingwave_storage::hummock::compactor::{Compactor, CompactorContext}; -use risingwave_storage::hummock::SstableIdManager; +use risingwave_storage::hummock::SstableObjectIdManager; use risingwave_storage::store::{LocalStateStore, NewLocalOptions, ReadOptions}; use risingwave_storage::StateStore; use serial_test::serial; @@ -47,46 +47,62 @@ use crate::get_notification_client_for_test; #[tokio::test] #[cfg(feature = "sync_point")] #[serial] -async fn test_syncpoints_sstable_id_manager() { +async fn test_syncpoints_sstable_object_id_manager() { let (_env, hummock_manager_ref, _cluster_manager_ref, worker_node) = setup_compute_env(8080).await; let hummock_meta_client: Arc = Arc::new(MockHummockMetaClient::new( hummock_manager_ref.clone(), worker_node.id, )); - let sstable_id_manager = Arc::new(SstableIdManager::new(hummock_meta_client.clone(), 5)); + let sstable_object_id_manager = + Arc::new(SstableObjectIdManager::new(hummock_meta_client.clone(), 5)); // Block filling cache after fetching ids. - sync_point::hook("MAP_NEXT_SST_ID.BEFORE_FILL_CACHE", || async { - sync_point::wait_timeout("MAP_NEXT_SST_ID.SIG_FILL_CACHE", Duration::from_secs(10)) - .await - .unwrap(); + sync_point::hook("MAP_NEXT_SST_OBJECT_ID.BEFORE_FILL_CACHE", || async { + sync_point::wait_timeout( + "MAP_NEXT_SST_OBJECT_ID.SIG_FILL_CACHE", + Duration::from_secs(10), + ) + .await + .unwrap(); }); // Start the task that fetches new ids. - let sstable_id_manager_clone = sstable_id_manager.clone(); + let sstable_object_id_manager_clone = sstable_object_id_manager.clone(); let leader_task = tokio::spawn(async move { - sstable_id_manager_clone.get_new_sst_id().await.unwrap(); + sstable_object_id_manager_clone + .get_new_sst_object_id() + .await + .unwrap(); }); - sync_point::wait_timeout("MAP_NEXT_SST_ID.AFTER_FETCH", Duration::from_secs(10)) - .await - .unwrap(); + sync_point::wait_timeout( + "MAP_NEXT_SST_OBJECT_ID.AFTER_FETCH", + Duration::from_secs(10), + ) + .await + .unwrap(); // Start tasks that waits to be notified. let mut follower_tasks = vec![]; for _ in 0..3 { - let sstable_id_manager_clone = sstable_id_manager.clone(); + let sstable_object_id_manager_clone = sstable_object_id_manager.clone(); let follower_task = tokio::spawn(async move { - sstable_id_manager_clone.get_new_sst_id().await.unwrap(); + sstable_object_id_manager_clone + .get_new_sst_object_id() + .await + .unwrap(); }); - sync_point::wait_timeout("MAP_NEXT_SST_ID.AS_FOLLOWER", Duration::from_secs(10)) - .await - .unwrap(); + sync_point::wait_timeout( + "MAP_NEXT_SST_OBJECT_ID.AS_FOLLOWER", + Duration::from_secs(10), + ) + .await + .unwrap(); follower_tasks.push(follower_task); } // Continue to fill cache. - sync_point::on("MAP_NEXT_SST_ID.SIG_FILL_CACHE").await; + sync_point::on("MAP_NEXT_SST_OBJECT_ID.SIG_FILL_CACHE").await; leader_task.await.unwrap(); for follower_task in follower_tasks { @@ -104,42 +120,52 @@ async fn test_syncpoints_test_failpoints_fetch_ids() { hummock_manager_ref.clone(), worker_node.id, )); - let sstable_id_manager = Arc::new(SstableIdManager::new(hummock_meta_client.clone(), 5)); + let sstable_object_id_manager = + Arc::new(SstableObjectIdManager::new(hummock_meta_client.clone(), 5)); // Block fetching ids. - sync_point::hook("MAP_NEXT_SST_ID.BEFORE_FETCH", || async { - sync_point::wait_timeout("MAP_NEXT_SST_ID.SIG_FETCH", Duration::from_secs(10)) + sync_point::hook("MAP_NEXT_SST_OBJECT_ID.BEFORE_FETCH", || async { + sync_point::wait_timeout("MAP_NEXT_SST_OBJECT_ID.SIG_FETCH", Duration::from_secs(10)) .await .unwrap(); - sync_point::remove_action("MAP_NEXT_SST_ID.BEFORE_FETCH"); + sync_point::remove_action("MAP_NEXT_SST_OBJECT_ID.BEFORE_FETCH"); }); // Start the task that fetches new ids. - let sstable_id_manager_clone = sstable_id_manager.clone(); + let sstable_object_id_manager_clone = sstable_object_id_manager.clone(); let leader_task = tokio::spawn(async move { fail::cfg("get_new_sst_ids_err", "return").unwrap(); - sstable_id_manager_clone.get_new_sst_id().await.unwrap_err(); + sstable_object_id_manager_clone + .get_new_sst_object_id() + .await + .unwrap_err(); fail::remove("get_new_sst_ids_err"); }); - sync_point::wait_timeout("MAP_NEXT_SST_ID.AS_LEADER", Duration::from_secs(10)) + sync_point::wait_timeout("MAP_NEXT_SST_OBJECT_ID.AS_LEADER", Duration::from_secs(10)) .await .unwrap(); // Start tasks that waits to be notified. let mut follower_tasks = vec![]; for _ in 0..3 { - let sstable_id_manager_clone = sstable_id_manager.clone(); + let sstable_object_id_manager_clone = sstable_object_id_manager.clone(); let follower_task = tokio::spawn(async move { - sstable_id_manager_clone.get_new_sst_id().await.unwrap(); + sstable_object_id_manager_clone + .get_new_sst_object_id() + .await + .unwrap(); }); - sync_point::wait_timeout("MAP_NEXT_SST_ID.AS_FOLLOWER", Duration::from_secs(10)) - .await - .unwrap(); + sync_point::wait_timeout( + "MAP_NEXT_SST_OBJECT_ID.AS_FOLLOWER", + Duration::from_secs(10), + ) + .await + .unwrap(); follower_tasks.push(follower_task); } // Continue to fetch ids. - sync_point::on("MAP_NEXT_SST_ID.SIG_FETCH").await; + sync_point::on("MAP_NEXT_SST_OBJECT_ID.SIG_FETCH").await; leader_task.await.unwrap(); // Failed leader task doesn't block follower tasks. @@ -252,7 +278,7 @@ async fn test_syncpoints_get_in_delete_range_boundary() { )); let compactor_manager = hummock_manager_ref.compactor_manager_ref_for_test(); - compactor_manager.add_compactor(worker_node.id, u64::MAX); + compactor_manager.add_compactor(worker_node.id, u64::MAX, 16); let mut local = storage .new_local(NewLocalOptions::for_test(existing_table_id.into())) diff --git a/src/storage/hummock_test/src/test_utils.rs b/src/storage/hummock_test/src/test_utils.rs index b47ef1bc5aaf0..da2763ca6cef1 100644 --- a/src/storage/hummock_test/src/test_utils.rs +++ b/src/storage/hummock_test/src/test_utils.rs @@ -29,7 +29,7 @@ use risingwave_meta::hummock::test_utils::{ use risingwave_meta::hummock::{HummockManagerRef, MockHummockMetaClient}; use risingwave_meta::manager::MetaSrvEnv; use risingwave_meta::storage::{MemStore, MetaStore}; -use risingwave_pb::catalog::Table as ProstTable; +use risingwave_pb::catalog::PbTable; use risingwave_pb::common::WorkerNode; use risingwave_pb::hummock::version_update_payload; use risingwave_rpc_client::HummockMetaClient; @@ -40,6 +40,7 @@ use risingwave_storage::hummock::iterator::test_utils::mock_sstable_store; use risingwave_storage::hummock::local_version::pinned_version::PinnedVersion; use risingwave_storage::hummock::observer_manager::HummockObserverNode; use risingwave_storage::hummock::test_utils::default_opts_for_test; +use risingwave_storage::hummock::write_limiter::WriteLimiter; use risingwave_storage::hummock::HummockStorage; use risingwave_storage::storage_value::StorageValue; use risingwave_storage::store::*; @@ -60,12 +61,14 @@ pub async fn prepare_first_valid_version( let notification_client = get_notification_client_for_test(env, hummock_manager_ref.clone(), worker_node.clone()); let backup_manager = BackupReader::unused(); + let write_limiter = WriteLimiter::unused(); let observer_manager = ObserverManager::new( notification_client, HummockObserverNode::new( Arc::new(FilterKeyExtractorManager::default()), backup_manager, tx.clone(), + write_limiter, ), ) .await; @@ -176,7 +179,7 @@ pub async fn register_tables_with_id_for_test( pub async fn register_tables_with_catalog_for_test( filter_key_extractor_manager: &FilterKeyExtractorManagerRef, hummock_manager_ref: &HummockManagerRef, - tables: &[ProstTable], + tables: &[PbTable], ) { update_filter_key_extractor_for_tables(filter_key_extractor_manager, tables); let table_ids = tables.iter().map(|t| t.id).collect_vec(); @@ -204,7 +207,7 @@ impl HummockTestEnv { .await; } - pub async fn register_table(&self, table: ProstTable) { + pub async fn register_table(&self, table: PbTable) { register_tables_with_catalog_for_test( self.storage.filter_key_extractor_manager(), &self.manager, diff --git a/src/storage/hummock_test/src/vacuum_tests.rs b/src/storage/hummock_test/src/vacuum_tests.rs index 8dec2541d075b..ed26089cf1d9b 100644 --- a/src/storage/hummock_test/src/vacuum_tests.rs +++ b/src/storage/hummock_test/src/vacuum_tests.rs @@ -32,12 +32,12 @@ use risingwave_storage::hummock::vacuum::Vacuum; async fn test_vacuum() { let sstable_store = mock_sstable_store(); // Put some SSTs to object store - let sst_ids = (1..10).collect_vec(); + let object_ids = (1..10).collect_vec(); let mut sstables = vec![]; - for sstable_id in &sst_ids { + for sstable_object_id in &object_ids { let sstable = gen_default_test_sstable( default_builder_opt_for_test(), - *sstable_id, + *sstable_object_id, sstable_store.clone(), ) .await; @@ -48,7 +48,7 @@ async fn test_vacuum() { // OK. let nonexistent_id = 11u64; let vacuum_task = VacuumTask { - sstable_ids: sst_ids + sstable_object_ids: object_ids .into_iter() .chain(iter::once(nonexistent_id)) .collect_vec(), diff --git a/src/storage/src/hummock/backup_reader.rs b/src/storage/src/hummock/backup_reader.rs index 1bcfe9bc1a72a..1a79a87e9d5ec 100644 --- a/src/storage/src/hummock/backup_reader.rs +++ b/src/storage/src/hummock/backup_reader.rs @@ -18,14 +18,16 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use arc_swap::ArcSwap; use futures::future::Shared; use futures::FutureExt; use risingwave_backup::error::BackupError; use risingwave_backup::meta_snapshot::MetaSnapshot; use risingwave_backup::storage::{ - DummyMetaSnapshotStorage, MetaSnapshotStorageRef, ObjectStoreMetaSnapshotStorage, + BoxedMetaSnapshotStorage, DummyMetaSnapshotStorage, ObjectStoreMetaSnapshotStorage, }; use risingwave_backup::MetaSnapshotId; +use risingwave_common::system_param::local_manager::SystemParamsReaderRef; use risingwave_object_store::object::object_metrics::ObjectStoreMetrics; use risingwave_object_store::object::parse_remote_object_store; @@ -40,39 +42,48 @@ type VersionHolder = ( tokio::sync::mpsc::UnboundedReceiver, ); -pub async fn parse_meta_snapshot_storage( - storage_url: &str, - storage_directory: &str, -) -> StorageResult { +async fn create_snapshot_store(config: &StoreConfig) -> StorageResult { let backup_object_store = Arc::new( parse_remote_object_store( - storage_url, + &config.0, Arc::new(ObjectStoreMetrics::unused()), "Meta Backup", ) .await, ); - let store = Arc::new( - ObjectStoreMetaSnapshotStorage::new(storage_directory, backup_object_store).await?, - ); + let store = + Box::new(ObjectStoreMetaSnapshotStorage::new(&config.1, backup_object_store).await?); Ok(store) } type InflightRequest = Shared> + Send>>>; +/// (url, dir) +type StoreConfig = (String, String); /// `BackupReader` helps to access historical hummock versions, /// which are persisted in meta snapshots (aka backups). pub struct BackupReader { versions: parking_lot::RwLock>, inflight_request: parking_lot::Mutex>, - store: MetaSnapshotStorageRef, + store: ArcSwap<(BoxedMetaSnapshotStorage, StoreConfig)>, refresh_tx: tokio::sync::mpsc::UnboundedSender, } impl BackupReader { - pub fn new(store: MetaSnapshotStorageRef) -> BackupReaderRef { + pub async fn new(storage_url: &str, storage_directory: &str) -> StorageResult { + let config = (storage_url.to_string(), storage_directory.to_string()); + let store = create_snapshot_store(&config).await?; + tracing::info!( + "backup reader is initialized: url={}, dir={}", + config.0, + config.1 + ); + Ok(Self::with_store((store, config))) + } + + fn with_store(store: (BoxedMetaSnapshotStorage, StoreConfig)) -> BackupReaderRef { let (refresh_tx, refresh_rx) = tokio::sync::mpsc::unbounded_channel(); let instance = Arc::new(Self { - store, + store: ArcSwap::from_pointee(store), versions: Default::default(), inflight_request: Default::default(), refresh_tx, @@ -82,9 +93,24 @@ impl BackupReader { } pub fn unused() -> BackupReaderRef { - Self::new(Arc::new(DummyMetaSnapshotStorage::default())) + Self::with_store(( + Box::::default(), + StoreConfig::default(), + )) + } + + async fn set_store(&self, config: StoreConfig) -> StorageResult<()> { + let new_store = create_snapshot_store(&config).await?; + tracing::info!( + "backup reader is updated: url={}, dir={}", + config.0, + config.1 + ); + self.store.store(Arc::new((new_store, config))); + Ok(()) } + /// Watches latest manifest id to keep local manifest update to date. async fn start_manifest_refresher( backup_reader: BackupReaderRef, mut refresh_rx: tokio::sync::mpsc::UnboundedReceiver, @@ -95,11 +121,13 @@ impl BackupReader { break; } let expect_manifest_id = expect_manifest_id.unwrap(); - let previous_id = backup_reader.store.manifest().manifest_id; + // Use the same store throughout one run. + let current_store = backup_reader.store.load_full(); + let previous_id = current_store.0.manifest().manifest_id; if expect_manifest_id <= previous_id { continue; } - if let Err(e) = backup_reader.store.refresh_manifest().await { + if let Err(e) = current_store.0.refresh_manifest().await { // reschedule refresh request tracing::warn!("failed to refresh backup manifest, will retry. {}", e); let backup_reader_clone = backup_reader.clone(); @@ -110,8 +138,8 @@ impl BackupReader { continue; } // purge stale version cache - let manifest: HashSet = backup_reader - .store + let manifest: HashSet = current_store + .0 .manifest() .snapshot_metadata .iter() @@ -138,9 +166,11 @@ impl BackupReader { self: &BackupReaderRef, epoch: u64, ) -> StorageResult> { + // Use the same store throughout the call. + let current_store = self.store.load_full(); // 1. check manifest to locate snapshot, if any. - let snapshot_id = self - .store + let snapshot_id = current_store + .0 .manifest() .snapshot_metadata .iter() @@ -163,7 +193,7 @@ impl BackupReader { } else { let this = self.clone(); let f = async move { - let snapshot = this.store.get(snapshot_id).await.map_err(|e| { + let snapshot = current_store.0.get(snapshot_id).await.map_err(|e| { format!("failed to get meta snapshot {}. {}", snapshot_id, e) })?; let version_holder = build_version_holder(snapshot); @@ -184,6 +214,34 @@ impl BackupReader { self.inflight_request.lock().remove(&snapshot_id); result } + + pub async fn watch_config_change( + &self, + mut rx: tokio::sync::watch::Receiver, + ) { + loop { + if rx.changed().await.is_err() { + break; + } + let p = rx.borrow().load(); + let config = ( + p.backup_storage_url().to_string(), + p.backup_storage_directory().to_string(), + ); + if config == self.store.load().1 { + continue; + } + if let Err(e) = self.set_store(config.clone()).await { + // Retry is driven by periodic system params notification. + tracing::warn!( + "failed to update backup reader: url={}, dir={}, {:#?}", + config.0, + config.1, + e + ); + } + } + } } fn build_version_holder(s: MetaSnapshot) -> VersionHolder { diff --git a/src/storage/src/hummock/block_cache.rs b/src/storage/src/hummock/block_cache.rs index 4586ac4d0f329..bf0d2e183b0eb 100644 --- a/src/storage/src/hummock/block_cache.rs +++ b/src/storage/src/hummock/block_cache.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use futures::Future; use risingwave_common::cache::{CacheableEntry, LookupResponse, LruCache, LruCacheEventListener}; -use risingwave_hummock_sdk::HummockSstableId; +use risingwave_hummock_sdk::HummockSstableObjectId; use tokio::sync::oneshot::Receiver; use tokio::task::JoinHandle; @@ -28,7 +28,7 @@ use crate::hummock::HummockError; const MIN_BUFFER_SIZE_PER_SHARD: usize = 32 * 1024 * 1024; -type CachedBlockEntry = CacheableEntry<(HummockSstableId, u64), Box>; +type CachedBlockEntry = CacheableEntry<(HummockSstableObjectId, u64), Box>; enum BlockEntry { Cache(CachedBlockEntry), @@ -38,7 +38,7 @@ enum BlockEntry { pub struct BlockHolder { _handle: BlockEntry, - block: *const Block, + pub block: *const Block, } impl BlockHolder { @@ -66,7 +66,9 @@ impl BlockHolder { } } - pub fn from_tiered_cache(entry: TieredCacheEntry<(HummockSstableId, u64), Box>) -> Self { + pub fn from_tiered_cache( + entry: TieredCacheEntry<(HummockSstableObjectId, u64), Box>, + ) -> Self { match entry { TieredCacheEntry::Cache(entry) => Self::from_cached_block(entry), TieredCacheEntry::Owned(block) => Self::from_owned_block(*block), @@ -86,11 +88,11 @@ unsafe impl Send for BlockHolder {} unsafe impl Sync for BlockHolder {} type BlockCacheEventListener = - Arc>>; + Arc>>; #[derive(Clone)] pub struct BlockCache { - inner: Arc>>, + inner: Arc>>, } pub enum BlockResponse { @@ -162,22 +164,22 @@ impl BlockCache { } } - pub fn get(&self, sst_id: HummockSstableId, block_idx: u64) -> Option { + pub fn get(&self, object_id: HummockSstableObjectId, block_idx: u64) -> Option { self.inner - .lookup(Self::hash(sst_id, block_idx), &(sst_id, block_idx)) + .lookup(Self::hash(object_id, block_idx), &(object_id, block_idx)) .map(BlockHolder::from_cached_block) } pub fn insert( &self, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, block_idx: u64, block: Box, high_priority: bool, ) -> BlockHolder { BlockHolder::from_cached_block(self.inner.insert( - (sst_id, block_idx), - Self::hash(sst_id, block_idx), + (object_id, block_idx), + Self::hash(object_id, block_idx), block.capacity(), block, high_priority, @@ -186,7 +188,7 @@ impl BlockCache { pub fn get_or_insert_with( &self, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, block_idx: u64, high_priority: bool, mut fetch_block: F, @@ -195,8 +197,8 @@ impl BlockCache { F: FnMut() -> Fut, Fut: Future>> + Send + 'static, { - let h = Self::hash(sst_id, block_idx); - let key = (sst_id, block_idx); + let h = Self::hash(object_id, block_idx); + let key = (object_id, block_idx); match self.inner.lookup_with_request_dedup::<_, HummockError, _>( h, key, @@ -221,9 +223,9 @@ impl BlockCache { } } - fn hash(sst_id: HummockSstableId, block_idx: u64) -> u64 { + fn hash(object_id: HummockSstableObjectId, block_idx: u64) -> u64 { let mut hasher = DefaultHasher::default(); - sst_id.hash(&mut hasher); + object_id.hash(&mut hasher); block_idx.hash(&mut hasher); hasher.finish() } diff --git a/src/storage/src/hummock/compactor/compaction_utils.rs b/src/storage/src/hummock/compactor/compaction_utils.rs index af59776495102..51cd245aa82b4 100644 --- a/src/storage/src/hummock/compactor/compaction_utils.rs +++ b/src/storage/src/hummock/compactor/compaction_utils.rs @@ -36,12 +36,12 @@ use crate::hummock::multi_builder::TableBuilderFactory; use crate::hummock::sstable::DEFAULT_ENTRY_SIZE; use crate::hummock::{ CachePolicy, FilterBuilder, HummockResult, MemoryLimiter, SstableBuilder, - SstableBuilderOptions, SstableIdManagerRef, SstableWriterFactory, SstableWriterOptions, + SstableBuilderOptions, SstableObjectIdManagerRef, SstableWriterFactory, SstableWriterOptions, }; use crate::monitor::StoreLocalStatistic; pub struct RemoteBuilderFactory { - pub sstable_id_manager: SstableIdManagerRef, + pub sstable_object_id_manager: SstableObjectIdManagerRef, pub limiter: Arc, pub options: SstableBuilderOptions, pub policy: CachePolicy, @@ -64,7 +64,10 @@ impl TableBuilderFactory for RemoteBu .require_memory((self.options.capacity + self.options.block_capacity) as u64) .await; let timer = Instant::now(); - let table_id = self.sstable_id_manager.get_new_sst_id().await?; + let table_id = self + .sstable_object_id_manager + .get_new_sst_object_id() + .await?; let cost = (timer.elapsed().as_secs_f64() * 1000000.0).round() as u64; self.remote_rpc_cost.fetch_add(cost, Ordering::Relaxed); let writer_options = SstableWriterOptions { @@ -126,8 +129,9 @@ pub struct TaskConfig { pub split_by_table: bool, } -pub fn estimate_memory_use_for_compaction(task: &CompactTask) -> u64 { +pub fn estimate_state_for_compaction(task: &CompactTask) -> (u64, usize) { let mut total_memory_size = 0; + let mut total_file_count = 0; for level in &task.input_ssts { if level.level_type == LevelType::Nonoverlapping as i32 { if let Some(table) = level.table_infos.first() { @@ -138,8 +142,11 @@ pub fn estimate_memory_use_for_compaction(task: &CompactTask) -> u64 { total_memory_size += table.file_size; } } + + total_file_count += level.table_infos.len(); } - total_memory_size + + (total_memory_size, total_file_count) } pub fn build_multi_compaction_filter(compact_task: &CompactTask) -> MultiCompactionFilter { diff --git a/src/storage/src/hummock/compactor/compactor_runner.rs b/src/storage/src/hummock/compactor/compactor_runner.rs index 692a8647e50b8..cd122fca3d03b 100644 --- a/src/storage/src/hummock/compactor/compactor_runner.rs +++ b/src/storage/src/hummock/compactor/compactor_runner.rs @@ -53,17 +53,6 @@ impl CompactorRunner { .map(|table| table.file_size) .sum::(); - let stats_target_table_ids: HashSet = task - .input_ssts - .iter() - .flat_map(|i| { - i.table_infos - .iter() - .flat_map(|t| t.table_ids.clone()) - .collect_vec() - }) - .collect(); - let mut options: SstableBuilderOptions = context.storage_opts.as_ref().into(); options.capacity = std::cmp::min(task.target_file_size as usize, max_target_file_size); options.compression_algorithm = match task.compression_algorithm { @@ -89,7 +78,7 @@ impl CompactorRunner { cache_policy: CachePolicy::NotFill, gc_delete_keys: task.gc_delete_keys, watermark: task.watermark, - stats_target_table_ids: Some(stats_target_table_ids), + stats_target_table_ids: Some(HashSet::from_iter(task.existing_table_ids.clone())), fill_high_priority_cache: false, task_type: task.task_type(), split_by_table: task.split_by_state_table, @@ -182,6 +171,7 @@ impl CompactorRunner { .cloned() .collect_vec(); table_iters.push(ConcatSstableIterator::new( + self.compact_task.existing_table_ids.clone(), tables, self.compactor.task_config.key_range.clone(), self.sstable_store.clone(), @@ -193,6 +183,7 @@ impl CompactorRunner { continue; } table_iters.push(ConcatSstableIterator::new( + self.compact_task.existing_table_ids.clone(), vec![table_info.clone()], self.compactor.task_config.key_range.clone(), self.sstable_store.clone(), diff --git a/src/storage/src/hummock/compactor/context.rs b/src/storage/src/hummock/compactor/context.rs index 41252ea0033b1..4ccb0e24a0cfd 100644 --- a/src/storage/src/hummock/compactor/context.rs +++ b/src/storage/src/hummock/compactor/context.rs @@ -21,7 +21,7 @@ use risingwave_rpc_client::HummockMetaClient; use super::task_progress::TaskProgressManagerRef; use crate::hummock::compactor::CompactionExecutor; use crate::hummock::sstable_store::SstableStoreRef; -use crate::hummock::{MemoryLimiter, SstableIdManagerRef}; +use crate::hummock::{MemoryLimiter, SstableObjectIdManagerRef}; use crate::monitor::CompactorMetrics; use crate::opts::StorageOpts; @@ -49,7 +49,7 @@ pub struct CompactorContext { pub read_memory_limiter: Arc, - pub sstable_id_manager: SstableIdManagerRef, + pub sstable_object_id_manager: SstableObjectIdManagerRef, pub task_progress_manager: TaskProgressManagerRef, @@ -62,7 +62,7 @@ impl CompactorContext { sstable_store: SstableStoreRef, hummock_meta_client: Arc, compactor_metrics: Arc, - sstable_id_manager: SstableIdManagerRef, + sstable_object_id_manager: SstableObjectIdManagerRef, filter_key_extractor_manager: FilterKeyExtractorManagerRef, compactor_runtime_config: CompactorRuntimeConfig, ) -> Self { @@ -85,7 +85,7 @@ impl CompactorContext { compaction_executor, filter_key_extractor_manager, read_memory_limiter: memory_limiter, - sstable_id_manager, + sstable_object_id_manager, task_progress_manager: Default::default(), compactor_runtime_config: Arc::new(tokio::sync::Mutex::new(compactor_runtime_config)), } diff --git a/src/storage/src/hummock/compactor/iterator.rs b/src/storage/src/hummock/compactor/iterator.rs index 79976dc420a67..070546f0a5bef 100644 --- a/src/storage/src/hummock/compactor/iterator.rs +++ b/src/storage/src/hummock/compactor/iterator.rs @@ -13,11 +13,13 @@ // limitations under the License. use std::cmp::Ordering; +use std::collections::HashSet; use std::future::Future; use std::sync::atomic::AtomicU64; use std::sync::{atomic, Arc}; use std::time::Instant; +use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::key_range::KeyRange; use risingwave_hummock_sdk::KeyComparator; @@ -43,8 +45,9 @@ struct SstableStreamIterator { /// Counts the time used for IO. stats_ptr: Arc, - // For debugging + /// For key sanity check of divided SST and debugging sstable_info: SstableInfo, + existing_table_ids: HashSet, } impl SstableStreamIterator { @@ -64,6 +67,7 @@ impl SstableStreamIterator { /// The iterator reads at most `max_block_count` from the stream. pub fn new( sstable_info: &SstableInfo, + existing_table_ids: HashSet, block_stream: BlockStream, max_block_count: usize, stats: &StoreLocalStatistic, @@ -73,15 +77,30 @@ impl SstableStreamIterator { block_iter: None, remaining_blocks: max_block_count, stats_ptr: stats.remote_io_time.clone(), + existing_table_ids, sstable_info: sstable_info.clone(), } } + async fn prune_from_valid_block_iter(&mut self) -> HummockResult<()> { + while let Some(block_iter) = self.block_iter.as_mut() { + if self + .existing_table_ids + .contains(&block_iter.table_id().table_id) + { + return Ok(()); + } else { + self.next_block().await?; + } + } + Ok(()) + } + /// Initialises the iterator by moving it to the first KV-pair in the stream's first block where /// key >= `seek_key`. If that block does not contain such a KV-pair, the iterator continues to /// the first KV-pair of the next block. If `seek_key` is not given, the iterator will move to /// the very first KV-pair of the stream's first block. - pub async fn seek(&mut self, seek_key: Option<&[u8]>) -> HummockResult<()> { + pub async fn seek(&mut self, seek_key: Option>) -> HummockResult<()> { // Load first block. self.next_block().await?; @@ -91,18 +110,15 @@ impl SstableStreamIterator { if let (Some(block_iter), Some(seek_key)) = (self.block_iter.as_mut(), seek_key) { block_iter.seek(seek_key); + if !block_iter.is_valid() { // `seek_key` is larger than everything in the first block. self.next_block().await?; + } else { } } - if self.block_iter.is_none() { - // End of stream. - self.remaining_blocks = 0; - } - - Ok(()) + self.prune_from_valid_block_iter().await } /// Loads a new block, creates a new iterator for it, and stores that iterator in @@ -151,12 +167,13 @@ impl SstableStreamIterator { block_iter.next(); if !block_iter.is_valid() { self.next_block().await?; + self.prune_from_valid_block_iter().await?; } Ok(()) } - fn key(&self) -> &[u8] { + fn key(&self) -> FullKey<&[u8]> { self.block_iter .as_ref() .unwrap_or_else(|| panic!("no block iter sstinfo={}", self.sst_debug_info())) @@ -180,8 +197,11 @@ impl SstableStreamIterator { fn sst_debug_info(&self) -> String { format!( - "sst_id={}, meta_offset={}, table_ids={:?}", - self.sstable_info.id, self.sstable_info.meta_offset, self.sstable_info.table_ids + "object_id={}, sst_id={}, meta_offset={}, table_ids={:?}", + self.sstable_info.get_object_id(), + self.sstable_info.get_sst_id(), + self.sstable_info.meta_offset, + self.sstable_info.table_ids ) } } @@ -198,7 +218,9 @@ pub struct ConcatSstableIterator { cur_idx: usize, /// All non-overlapping tables. - tables: Vec, + sstables: Vec, + + existing_table_ids: HashSet, sstable_store: SstableStoreRef, @@ -210,7 +232,8 @@ impl ConcatSstableIterator { /// arranged in ascending order when it serves as a forward iterator, /// and arranged in descending order when it serves as a backward iterator. pub fn new( - tables: Vec, + existing_table_ids: Vec, + sst_infos: Vec, key_range: KeyRange, sstable_store: SstableStoreRef, ) -> Self { @@ -218,42 +241,56 @@ impl ConcatSstableIterator { key_range, sstable_iter: None, cur_idx: 0, - tables, + sstables: sst_infos, + existing_table_ids: HashSet::from_iter(existing_table_ids), sstable_store, stats: StoreLocalStatistic::default(), } } /// Resets the iterator, loads the specified SST, and seeks in that SST to `seek_key` if given. - async fn seek_idx(&mut self, idx: usize, seek_key: Option<&[u8]>) -> HummockResult<()> { + async fn seek_idx( + &mut self, + idx: usize, + seek_key: Option>, + ) -> HummockResult<()> { self.sstable_iter.take(); - let seek_key: Option<&[u8]> = match (seek_key, self.key_range.left.is_empty()) { - (Some(seek_key), false) => { - match KeyComparator::compare_encoded_full_key(seek_key, &self.key_range.left) { - Ordering::Less | Ordering::Equal => Some(&self.key_range.left), - Ordering::Greater => Some(seek_key), - } - } + let mut seek_key: Option> = match (seek_key, self.key_range.left.is_empty()) + { + (Some(seek_key), false) => match seek_key.cmp(&FullKey::decode(&self.key_range.left)) { + Ordering::Less | Ordering::Equal => Some(FullKey::decode(&self.key_range.left)), + Ordering::Greater => Some(seek_key), + }, (Some(seek_key), true) => Some(seek_key), (None, true) => None, - (None, false) => Some(&self.key_range.left), + (None, false) => Some(FullKey::decode(&self.key_range.left)), }; - - if idx < self.tables.len() { - let table_info = &self.tables[idx]; - let table = self + self.cur_idx = idx; + while self.cur_idx < self.sstables.len() { + let table_info = &self.sstables[self.cur_idx]; + let mut found = table_info + .table_ids + .iter() + .any(|table_id| self.existing_table_ids.contains(table_id)); + if !found { + self.cur_idx += 1; + seek_key = None; + continue; + } + let sstable = self .sstable_store .sstable(table_info, &mut self.stats) .await?; - let block_metas = &table.value().meta.block_metas; - let start_index = match seek_key { + let stats_ptr = self.stats.remote_io_time.clone(); + let now = Instant::now(); + let block_metas = &sstable.value().meta.block_metas; + let mut start_index = match seek_key { None => 0, Some(seek_key) => { // start_index points to the greatest block whose smallest_key <= seek_key. block_metas .partition_point(|block| { - KeyComparator::compare_encoded_full_key(&block.smallest_key, seek_key) - != Ordering::Greater + seek_key.cmp(&FullKey::decode(&block.smallest_key)) != Ordering::Less }) .saturating_sub(1) } @@ -268,32 +305,51 @@ impl ConcatSstableIterator { ) != Ordering::Greater }) }; - if end_index <= start_index { + while start_index < end_index { + let start_block_table_id = block_metas[start_index].table_id(); + if self + .existing_table_ids + .contains(&block_metas[start_index].table_id().table_id) + { + break; + } + start_index += &block_metas[(start_index + 1)..] + .partition_point(|block_meta| block_meta.table_id() == start_block_table_id) + + 1; + } + if start_index >= end_index { + found = false; + } else { + let block_stream = self + .sstable_store + .get_stream(sstable.value(), Some(start_index)) + .await?; + + // Determine time needed to open stream. + let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); + stats_ptr.fetch_add(add as u64, atomic::Ordering::Relaxed); + + let mut sstable_iter = SstableStreamIterator::new( + table_info, + self.existing_table_ids.clone(), + block_stream, + end_index - start_index, + &self.stats, + ); + sstable_iter.seek(seek_key).await?; + + if sstable_iter.is_valid() { + self.sstable_iter = Some(sstable_iter); + } else { + found = false; + } + } + if found { return Ok(()); + } else { + self.cur_idx += 1; + seek_key = None; } - - let stats_ptr = self.stats.remote_io_time.clone(); - let now = Instant::now(); - - let block_stream = self - .sstable_store - .get_stream(table.value(), Some(start_index)) - .await?; - - // Determine time needed to open stream. - let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); - stats_ptr.fetch_add(add as u64, atomic::Ordering::Relaxed); - - let mut sstable_iter = SstableStreamIterator::new( - table_info, - block_stream, - end_index - start_index, - &self.stats, - ); - sstable_iter.seek(seek_key).await?; - - self.sstable_iter = Some(sstable_iter); - self.cur_idx = idx; } Ok(()) } @@ -323,7 +379,7 @@ impl HummockIterator for ConcatSstableIterator { } fn key(&self) -> FullKey<&[u8]> { - FullKey::decode(self.sstable_iter.as_ref().expect("no table iter").key()) + self.sstable_iter.as_ref().expect("no table iter").key() } fn value(&self) -> HummockValue<&[u8]> { @@ -341,17 +397,15 @@ impl HummockIterator for ConcatSstableIterator { /// Resets the iterator and seeks to the first position where the stored key >= `key`. fn seek<'a>(&'a mut self, key: FullKey<&'a [u8]>) -> Self::SeekFuture<'a> { async move { - let encoded_key = key.encode(); - let key_slice = encoded_key.as_slice(); - let seek_key: &[u8] = if self.key_range.left.is_empty() { - key_slice + let seek_key = if self.key_range.left.is_empty() { + key } else { - match KeyComparator::compare_encoded_full_key(key_slice, &self.key_range.left) { - Ordering::Less | Ordering::Equal => &self.key_range.left, - Ordering::Greater => key_slice, + match key.cmp(&FullKey::decode(&self.key_range.left)) { + Ordering::Less | Ordering::Equal => FullKey::decode(&self.key_range.left), + Ordering::Greater => key, } }; - let table_idx = self.tables.partition_point(|table| { + let table_idx = self.sstables.partition_point(|table| { // We use the maximum key of an SST for the search. That way, we guarantee that the // resulting SST contains either that key or the next-larger KV-pair. Subsequently, // we avoid calling `seek_idx()` twice if the determined SST does not contain `key`. @@ -359,10 +413,10 @@ impl HummockIterator for ConcatSstableIterator { // Note that we need to use `<` instead of `<=` to ensure that all keys in an SST // (including its max. key) produce the same search result. let max_sst_key = &table.key_range.as_ref().unwrap().right; - KeyComparator::compare_encoded_full_key(max_sst_key, seek_key) == Ordering::Less + FullKey::decode(max_sst_key).cmp(&seek_key) == Ordering::Less }); - self.seek_idx(table_idx, Some(key_slice)).await + self.seek_idx(table_idx, Some(key)).await } } @@ -377,13 +431,13 @@ mod tests { use risingwave_hummock_sdk::key::{next_full_key, prev_full_key, FullKey}; use risingwave_hummock_sdk::key_range::KeyRange; - use risingwave_hummock_sdk::KeyComparator; use crate::hummock::compactor::ConcatSstableIterator; use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::iterator::HummockIterator; use crate::hummock::test_utils::{ - default_builder_opt_for_test, gen_test_sstable, test_key_of, test_value_of, TEST_KEYS_COUNT, + default_builder_opt_for_test, gen_test_sstable_and_info, test_key_of, test_value_of, + TEST_KEYS_COUNT, }; use crate::hummock::value::HummockValue; @@ -391,18 +445,18 @@ mod tests { async fn test_concat_iterator() { let sstable_store = mock_sstable_store(); let mut table_infos = vec![]; - for sst_id in 0..3 { - let start_index = sst_id * TEST_KEYS_COUNT; - let end_index = (sst_id + 1) * TEST_KEYS_COUNT; - let table = gen_test_sstable( + for object_id in 0..3 { + let start_index = object_id * TEST_KEYS_COUNT; + let end_index = (object_id + 1) * TEST_KEYS_COUNT; + let (_table, table_info) = gen_test_sstable_and_info( default_builder_opt_for_test(), - sst_id as u64, + object_id as u64, (start_index..end_index) .map(|i| (test_key_of(i), HummockValue::put(test_value_of(i)))), sstable_store.clone(), ) .await; - table_infos.push(table.get_sstable_info()); + table_infos.push(table_info); } let start_index = 5000; let end_index = 25000; @@ -411,8 +465,12 @@ mod tests { test_key_of(start_index).encode().into(), test_key_of(end_index).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); iter.seek(FullKey::decode(&kr.left)).await.unwrap(); for idx in start_index..end_index { @@ -431,16 +489,24 @@ mod tests { test_key_of(30000).encode().into(), test_key_of(40000).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); iter.seek(FullKey::decode(&kr.left)).await.unwrap(); assert!(!iter.is_valid()); let kr = KeyRange::new( test_key_of(start_index).encode().into(), test_key_of(40000).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); iter.seek(FullKey::decode(&kr.left)).await.unwrap(); for idx in start_index..30000 { let key = iter.key(); @@ -459,8 +525,12 @@ mod tests { test_key_of(0).encode().into(), test_key_of(40000).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); iter.seek(test_key_of(10000).to_ref()).await.unwrap(); assert!(iter.is_valid() && iter.cur_idx == 1 && iter.key() == test_key_of(10000).to_ref()); iter.seek(test_key_of(10001).to_ref()).await.unwrap(); @@ -479,8 +549,12 @@ mod tests { test_key_of(6000).encode().into(), test_key_of(16000).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); iter.seek(test_key_of(17000).to_ref()).await.unwrap(); assert!(!iter.is_valid()); iter.seek(test_key_of(1).to_ref()).await.unwrap(); @@ -491,18 +565,18 @@ mod tests { async fn test_concat_iterator_seek_idx() { let sstable_store = mock_sstable_store(); let mut table_infos = vec![]; - for sst_id in 0..3 { - let start_index = sst_id * TEST_KEYS_COUNT + TEST_KEYS_COUNT / 2; - let end_index = (sst_id + 1) * TEST_KEYS_COUNT; - let table = gen_test_sstable( + for object_id in 0..3 { + let start_index = object_id * TEST_KEYS_COUNT + TEST_KEYS_COUNT / 2; + let end_index = (object_id + 1) * TEST_KEYS_COUNT; + let (_table, table_info) = gen_test_sstable_and_info( default_builder_opt_for_test(), - sst_id as u64, + object_id as u64, (start_index..end_index) .map(|i| (test_key_of(i), HummockValue::put(test_value_of(i)))), sstable_store.clone(), ) .await; - table_infos.push(table.get_sstable_info()); + table_infos.push(table_info); } // Test seek_idx. Result is dominated by given seek key rather than key range. @@ -510,10 +584,14 @@ mod tests { test_key_of(0).encode().into(), test_key_of(40000).encode().into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); let sst = sstable_store - .sstable(&iter.tables[0], &mut iter.stats) + .sstable(&iter.sstables[0], &mut iter.stats) .await .unwrap(); let block_metas = &sst.value().meta.block_metas; @@ -521,18 +599,22 @@ mod tests { let block_2_smallest_key = block_metas[2].smallest_key.clone(); // Use block_1_smallest_key as seek key and result in the first KV of block 1. let seek_key = block_1_smallest_key.clone(); - iter.seek_idx(0, Some(seek_key.as_slice())).await.unwrap(); + iter.seek_idx(0, Some(FullKey::decode(&seek_key))) + .await + .unwrap(); assert!(iter.is_valid() && iter.key() == FullKey::decode(block_1_smallest_key.as_slice())); // Use prev_full_key(block_1_smallest_key) as seek key and result in the first KV of block // 1. let seek_key = prev_full_key(block_1_smallest_key.as_slice()); - iter.seek_idx(0, Some(seek_key.as_slice())).await.unwrap(); + iter.seek_idx(0, Some(FullKey::decode(&seek_key))) + .await + .unwrap(); assert!(iter.is_valid() && iter.key() == FullKey::decode(block_1_smallest_key.as_slice())); iter.next().await.unwrap(); let block_1_second_key = iter.key().to_vec(); // Use a big enough seek key and result in invalid iterator. let seek_key = test_key_of(30001); - iter.seek_idx(0, Some(seek_key.encode().as_slice())) + iter.seek_idx(table_infos.len() - 1, Some(seek_key.to_ref())) .await .unwrap(); assert!(!iter.is_valid()); @@ -542,16 +624,22 @@ mod tests { next_full_key(&block_1_smallest_key).into(), prev_full_key(&block_2_smallest_key).into(), ); - let mut iter = - ConcatSstableIterator::new(table_infos.clone(), kr.clone(), sstable_store.clone()); + let mut iter = ConcatSstableIterator::new( + vec![0], + table_infos.clone(), + kr.clone(), + sstable_store.clone(), + ); // Use block_2_smallest_key as seek key and result in invalid iterator. - let seek_key = block_2_smallest_key.clone(); - assert!(KeyComparator::compare_encoded_full_key(&seek_key, &kr.right) == Ordering::Greater); - iter.seek_idx(0, Some(seek_key.as_slice())).await.unwrap(); + let seek_key = FullKey::decode(&block_2_smallest_key); + assert!(seek_key.cmp(&FullKey::decode(&kr.right)) == Ordering::Greater); + iter.seek_idx(0, Some(seek_key)).await.unwrap(); assert!(!iter.is_valid()); // Use a small enough seek key and result in the second KV of block 1. let seek_key = test_key_of(0).encode(); - iter.seek_idx(0, Some(seek_key.as_slice())).await.unwrap(); + iter.seek_idx(0, Some(FullKey::decode(&seek_key))) + .await + .unwrap(); assert!(iter.is_valid()); assert_eq!(iter.key(), block_1_second_key.to_ref()); // Use None seek key and result in the second KV of block 1. diff --git a/src/storage/src/hummock/compactor/mod.rs b/src/storage/src/hummock/compactor/mod.rs index c84188dbd588a..95997d8fd51f4 100644 --- a/src/storage/src/hummock/compactor/mod.rs +++ b/src/storage/src/hummock/compactor/mod.rs @@ -23,6 +23,7 @@ pub(super) mod task_progress; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; +use std::ops::Div; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -37,6 +38,7 @@ use futures::future::try_join_all; use futures::{stream, StreamExt}; pub use iterator::ConcatSstableIterator; use itertools::Itertools; +use risingwave_common::util::resource_util; use risingwave_hummock_sdk::compact::compact_task_to_string; use risingwave_hummock_sdk::filter_key_extractor::FilterKeyExtractorImpl; use risingwave_hummock_sdk::key::FullKey; @@ -44,9 +46,12 @@ use risingwave_hummock_sdk::table_stats::{add_table_stats_map, TableStats, Table use risingwave_hummock_sdk::LocalSstableInfo; use risingwave_pb::hummock::compact_task::TaskStatus; use risingwave_pb::hummock::subscribe_compact_tasks_response::Task; -use risingwave_pb::hummock::{CompactTask, CompactTaskProgress, SubscribeCompactTasksResponse}; +use risingwave_pb::hummock::{ + CompactTask, CompactTaskProgress, CompactorWorkload, SubscribeCompactTasksResponse, +}; use risingwave_rpc_client::HummockMetaClient; pub use shared_buffer_compact::compact; +use sysinfo::{CpuRefreshKind, ProcessExt, ProcessRefreshKind, RefreshKind, System, SystemExt}; use tokio::sync::oneshot::{Receiver, Sender}; use tokio::task::JoinHandle; @@ -55,7 +60,7 @@ use self::task_progress::TaskProgress; use super::multi_builder::CapacitySplitTableBuilder; use super::{HummockResult, SstableBuilderOptions, XorFilterBuilder}; use crate::hummock::compactor::compaction_utils::{ - build_multi_compaction_filter, estimate_memory_use_for_compaction, generate_splits, + build_multi_compaction_filter, estimate_state_for_compaction, generate_splits, }; use crate::hummock::compactor::compactor_runner::CompactorRunner; use crate::hummock::compactor::task_progress::TaskProgressGuard; @@ -90,18 +95,22 @@ impl Compactor { let context = compactor_context.clone(); // Set a watermark SST id to prevent full GC from accidentally deleting SSTs for in-progress // write op. The watermark is invalidated when this method exits. - let tracker_id = match context.sstable_id_manager.add_watermark_sst_id(None).await { + let tracker_id = match context + .sstable_object_id_manager + .add_watermark_object_id(None) + .await + { Ok(tracker_id) => tracker_id, Err(err) => { - tracing::warn!("Failed to track pending SST id. {:#?}", err); - return TaskStatus::TrackSstIdFailed; + tracing::warn!("Failed to track pending SST object id. {:#?}", err); + return TaskStatus::TrackSstObjectIdFailed; } }; - let sstable_id_manager_clone = context.sstable_id_manager.clone(); + let sstable_object_id_manager_clone = context.sstable_object_id_manager.clone(); let _guard = scopeguard::guard( - (tracker_id, sstable_id_manager_clone), - |(tracker_id, sstable_id_manager)| { - sstable_id_manager.remove_watermark_sst_id(tracker_id); + (tracker_id, sstable_object_id_manager_clone), + |(tracker_id, sstable_object_id_manager)| { + sstable_object_id_manager.remove_watermark_object_id(tracker_id); }, ); let group_label = compact_task.compaction_group_id.to_string(); @@ -153,11 +162,12 @@ impl Compactor { .with_label_values(&[compact_task.input_ssts[0].level_idx.to_string().as_str()]) .start_timer(); - let need_quota = estimate_memory_use_for_compaction(&compact_task); + let (need_quota, file_counts) = estimate_state_for_compaction(&compact_task); tracing::info!( - "Ready to handle compaction task: {} need memory: {} target_level {} compression_algorithm {:?}", + "Ready to handle compaction task: {} need memory: {} input_file_counts {} target_level {} compression_algorithm {:?}", compact_task.task_id, need_quota, + file_counts, compact_task.target_level, compact_task.compression_algorithm, ); @@ -264,7 +274,7 @@ impl Compactor { context.compactor_metrics.compact_task_pending_num.dec(); for level in &compact_task.input_ssts { for table in &level.table_infos { - context.sstable_store.delete_cache(table.id); + context.sstable_store.delete_cache(table.get_object_id()); } } task_status @@ -333,13 +343,21 @@ impl Compactor { ) -> (JoinHandle<()>, Sender<()>) { type CompactionShutdownMap = Arc>>>; let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); - let stream_retry_interval = Duration::from_secs(60); + let stream_retry_interval = Duration::from_secs(30); let task_progress = compactor_context.task_progress_manager.clone(); let task_progress_update_interval = Duration::from_millis(1000); + let cpu_core_num = resource_util::cpu::total_cpu_available() as u32; + + let mut system = + System::new_with_specifics(RefreshKind::new().with_cpu(CpuRefreshKind::everything())); + let pid = sysinfo::get_current_pid().unwrap(); + let join_handle = tokio::spawn(async move { let shutdown_map = CompactionShutdownMap::default(); let mut min_interval = tokio::time::interval(stream_retry_interval); let mut task_progress_interval = tokio::time::interval(task_progress_update_interval); + let mut workload_collect_interval = tokio::time::interval(Duration::from_secs(60)); + // This outer loop is to recreate stream. 'start_stream: loop { tokio::select! { @@ -354,7 +372,7 @@ impl Compactor { let config = compactor_context.lock_config().await; let mut stream = match hummock_meta_client - .subscribe_compact_tasks(config.max_concurrent_task_number) + .subscribe_compact_tasks(config.max_concurrent_task_number, cpu_core_num) .await { Ok(stream) => { @@ -372,6 +390,8 @@ impl Compactor { drop(config); let executor = compactor_context.compaction_executor.clone(); + let mut last_workload = CompactorWorkload::default(); + // This inner loop is to consume stream or report task progress. 'consume_stream: loop { let message = tokio::select! { @@ -384,12 +404,34 @@ impl Compactor { num_ssts_uploaded: progress.num_ssts_uploaded.load(Ordering::Relaxed), }); } - if let Err(e) = hummock_meta_client.report_compaction_task_progress(progress_list).await { + + if let Err(e) = hummock_meta_client.compactor_heartbeat(progress_list, last_workload.clone()).await { // ignore any errors while trying to report task progress tracing::warn!("Failed to report task progress. {e:?}"); } continue; } + + _ = workload_collect_interval.tick() => { + let refresh_result = system.refresh_process_specifics(pid, ProcessRefreshKind::new().with_cpu()); + debug_assert!(refresh_result); + let cpu = if let Some(process) = system.process(pid) { + process.cpu_usage().div(cpu_core_num as f32) as u32 + } else { + tracing::warn!("fail to get process pid {:?}", pid); + 0 + }; + + tracing::debug!("compactor cpu usage {cpu}"); + let workload = CompactorWorkload { + cpu, + }; + + last_workload = workload.clone(); + + continue; + } + message = stream.next() => { message }, @@ -408,15 +450,13 @@ impl Compactor { let shutdown = shutdown_map.clone(); let context = compactor_context.clone(); let meta_client = hummock_meta_client.clone(); + executor.spawn(async move { match task { Task::CompactTask(compact_task) => { let (tx, rx) = tokio::sync::oneshot::channel(); let task_id = compact_task.task_id; - shutdown - .lock() - .unwrap() - .insert(task_id, tx); + shutdown.lock().unwrap().insert(task_id, tx); Compactor::compact(context, compact_task, rx).await; shutdown.lock().unwrap().remove(&task_id); } @@ -454,15 +494,15 @@ impl Compactor { "Cancellation of compaction task failed. task_id: {}", cancel_compact_task.task_id ); - } - } else { - tracing::warn!( + } + } else { + tracing::warn!( "Attempting to cancel non-existent compaction task. task_id: {}", cancel_compact_task.task_id ); - } } } + } }); } Some(Err(e)) => { @@ -592,7 +632,7 @@ impl Compactor { // Don't allow two SSTs to share same user key sst_builder - .add_full_key(&iter_key, value, is_new_user_key) + .add_full_key(iter_key, value, is_new_user_key) .await?; iter.next().await?; @@ -732,7 +772,7 @@ impl Compactor { task_progress: Option>, ) -> HummockResult<(Vec, CompactionStatistics)> { let builder_factory = RemoteBuilderFactory:: { - sstable_id_manager: self.context.sstable_id_manager.clone(), + sstable_object_id_manager: self.context.sstable_object_id_manager.clone(), limiter: self.context.read_memory_limiter.clone(), options: self.options.clone(), policy: self.task_config.cache_policy, diff --git a/src/storage/src/hummock/error.rs b/src/storage/src/hummock/error.rs index 4690d3e562345..55465d57f797b 100644 --- a/src/storage/src/hummock/error.rs +++ b/src/storage/src/hummock/error.rs @@ -53,8 +53,8 @@ enum HummockErrorInner { CompactionExecutor(String), #[error("TieredCache error {0}.")] TieredCache(String), - #[error("SstIdTracker error {0}.")] - SstIdTrackerError(String), + #[error("SstObjectIdTracker error {0}.")] + SstObjectIdTrackerError(String), #[error("CompactionGroup error {0}.")] CompactionGroupError(String), #[error("SstableUpload error {0}.")] @@ -134,8 +134,8 @@ impl HummockError { HummockErrorInner::CompactionExecutor(error.to_string()).into() } - pub fn sst_id_tracker_error(error: impl ToString) -> HummockError { - HummockErrorInner::SstIdTrackerError(error.to_string()).into() + pub fn sst_object_id_tracker_error(error: impl ToString) -> HummockError { + HummockErrorInner::SstObjectIdTrackerError(error.to_string()).into() } pub fn compaction_group_error(error: impl ToString) -> HummockError { diff --git a/src/storage/src/hummock/event_handler/hummock_event_handler.rs b/src/storage/src/hummock/event_handler/hummock_event_handler.rs index d8dcf30393e8e..35efed6652e2d 100644 --- a/src/storage/src/hummock/event_handler/hummock_event_handler.rs +++ b/src/storage/src/hummock/event_handler/hummock_event_handler.rs @@ -41,7 +41,9 @@ use crate::hummock::store::version::{ HummockReadVersion, StagingData, StagingSstableInfo, VersionUpdate, }; use crate::hummock::utils::validate_table_key_range; -use crate::hummock::{HummockError, HummockResult, MemoryLimiter, SstableIdManagerRef, TrackerId}; +use crate::hummock::{ + HummockError, HummockResult, MemoryLimiter, SstableObjectIdManagerRef, TrackerId, +}; use crate::opts::StorageOpts; use crate::store::SyncResult; @@ -106,7 +108,7 @@ pub struct HummockEventHandler { last_instance_id: LocalInstanceId, - sstable_id_manager: SstableIdManagerRef, + sstable_object_id_manager: SstableObjectIdManagerRef, } async fn flush_imms( @@ -116,8 +118,8 @@ async fn flush_imms( ) -> HummockResult> { for epoch in &task_info.epochs { let _ = compactor_context - .sstable_id_manager - .add_watermark_sst_id(Some(*epoch)) + .sstable_object_id_manager + .add_watermark_object_id(Some(*epoch)) .await .inspect_err(|e| { error!("unable to set watermark sst id. epoch: {}, {:?}", epoch, e); @@ -142,7 +144,7 @@ impl HummockEventHandler { let buffer_tracker = BufferTracker::from_storage_opts(&compactor_context.storage_opts); let write_conflict_detector = ConflictDetector::new_from_config(&compactor_context.storage_opts); - let sstable_id_manager = compactor_context.sstable_id_manager.clone(); + let sstable_object_id_manager = compactor_context.sstable_object_id_manager.clone(); let uploader = HummockUploader::new( pinned_version.clone(), Arc::new(move |payload, task_info| { @@ -161,7 +163,7 @@ impl HummockEventHandler { read_version_mapping, uploader, last_instance_id: 0, - sstable_id_manager, + sstable_object_id_manager, } } @@ -359,8 +361,8 @@ impl HummockEventHandler { }); } - self.sstable_id_manager - .remove_watermark_sst_id(TrackerId::Epoch(HummockEpoch::MAX)); + self.sstable_object_id_manager + .remove_watermark_object_id(TrackerId::Epoch(HummockEpoch::MAX)); // Notify completion of the Clear event. let _ = notifier.send(()).inspect_err(|e| { @@ -412,8 +414,8 @@ impl HummockEventHandler { if let Some(conflict_detector) = self.write_conflict_detector.as_ref() { conflict_detector.set_watermark(max_committed_epoch); } - self.sstable_id_manager - .remove_watermark_sst_id(TrackerId::Epoch( + self.sstable_object_id_manager + .remove_watermark_object_id(TrackerId::Epoch( self.pinned_version.load().max_committed_epoch(), )); diff --git a/src/storage/src/hummock/event_handler/uploader.rs b/src/storage/src/hummock/event_handler/uploader.rs index a11ecae8704ae..0b08a214ecc9a 100644 --- a/src/storage/src/hummock/event_handler/uploader.rs +++ b/src/storage/src/hummock/event_handler/uploader.rs @@ -812,9 +812,10 @@ mod tests { ) -> Vec { let start_full_key = FullKey::new(TEST_TABLE_ID, TableKey(dummy_table_key()), start_epoch); let end_full_key = FullKey::new(TEST_TABLE_ID, TableKey(dummy_table_key()), end_epoch); - let gen_sst_id = (start_epoch << 8) + end_epoch; + let gen_sst_object_id = (start_epoch << 8) + end_epoch; vec![LocalSstableInfo::for_test(SstableInfo { - id: gen_sst_id, + object_id: gen_sst_object_id, + sst_id: gen_sst_object_id, key_range: Some(KeyRange { left: start_full_key.encode(), right: end_full_key.encode(), @@ -825,7 +826,6 @@ mod tests { meta_offset: 0, stale_key_count: 0, total_key_count: 0, - divide_version: 0, uncompressed_file_size: 0, min_epoch: 0, max_epoch: 0, diff --git a/src/storage/src/hummock/hummock_meta_client.rs b/src/storage/src/hummock/hummock_meta_client.rs index 2c4e0e8af1849..b91cfc03f246d 100644 --- a/src/storage/src/hummock/hummock_meta_client.rs +++ b/src/storage/src/hummock/hummock_meta_client.rs @@ -17,9 +17,10 @@ use std::sync::Arc; use async_trait::async_trait; use futures::stream::BoxStream; use risingwave_hummock_sdk::table_stats::TableStatsMap; -use risingwave_hummock_sdk::{HummockSstableId, LocalSstableInfo, SstIdRange}; +use risingwave_hummock_sdk::{HummockSstableObjectId, LocalSstableInfo, SstObjectIdRange}; use risingwave_pb::hummock::{ - CompactTask, CompactTaskProgress, HummockSnapshot, HummockVersion, VacuumTask, + CompactTask, CompactTaskProgress, CompactorWorkload, HummockSnapshot, HummockVersion, + VacuumTask, }; use risingwave_rpc_client::error::Result; use risingwave_rpc_client::{CompactTaskItem, HummockMetaClient, MetaClient}; @@ -88,7 +89,7 @@ impl HummockMetaClient for MonitoredHummockMetaClient { unreachable!("Currently CNs should not call this function") } - async fn get_new_sst_ids(&self, number: u32) -> Result { + async fn get_new_sst_ids(&self, number: u32) -> Result { self.stats.get_new_sst_ids_counts.inc(); let timer = self.stats.get_new_sst_ids_latency.start_timer(); let res = self.meta_client.get_new_sst_ids(number).await; @@ -122,18 +123,20 @@ impl HummockMetaClient for MonitoredHummockMetaClient { async fn subscribe_compact_tasks( &self, max_concurrent_task_number: u64, + cpu_core_num: u32, ) -> Result> { self.meta_client - .subscribe_compact_tasks(max_concurrent_task_number) + .subscribe_compact_tasks(max_concurrent_task_number, cpu_core_num) .await } - async fn report_compaction_task_progress( + async fn compactor_heartbeat( &self, progress: Vec, + workload: CompactorWorkload, ) -> Result<()> { self.meta_client - .report_compaction_task_progress(progress) + .compactor_heartbeat(progress, workload) .await } @@ -152,8 +155,8 @@ impl HummockMetaClient for MonitoredHummockMetaClient { .await } - async fn report_full_scan_task(&self, sst_ids: Vec) -> Result<()> { - self.meta_client.report_full_scan_task(sst_ids).await + async fn report_full_scan_task(&self, object_ids: Vec) -> Result<()> { + self.meta_client.report_full_scan_task(object_ids).await } async fn trigger_full_gc(&self, sst_retention_time_sec: u64) -> Result<()> { diff --git a/src/storage/src/hummock/iterator/concat_inner.rs b/src/storage/src/hummock/iterator/concat_inner.rs index ed90916485550..79c0bd2fc96ef 100644 --- a/src/storage/src/hummock/iterator/concat_inner.rs +++ b/src/storage/src/hummock/iterator/concat_inner.rs @@ -19,7 +19,6 @@ use std::sync::Arc; use itertools::Itertools; use risingwave_common::must_match; use risingwave_hummock_sdk::key::FullKey; -use risingwave_hummock_sdk::KeyComparator; use risingwave_pb::hummock::SstableInfo; use crate::hummock::iterator::{DirectionEnum, HummockIterator, HummockIteratorDirection}; @@ -195,22 +194,17 @@ impl HummockIterator for ConcatIteratorInner { fn seek<'a>(&'a mut self, key: FullKey<&'a [u8]>) -> Self::SeekFuture<'a> { async move { - let encoded_key = key.encode(); let table_idx = self .tables .partition_point(|table| match Self::Direction::direction() { DirectionEnum::Forward => { - let ord = KeyComparator::compare_encoded_full_key( - table.smallest_key(), - &encoded_key[..], - ); + let ord = FullKey::decode(table.smallest_key()).cmp(&key); + ord == Less || ord == Equal } DirectionEnum::Backward => { - let ord = KeyComparator::compare_encoded_full_key( - table.largest_key(), - &encoded_key[..], - ); + let ord = FullKey::decode(table.largest_key()).cmp(&key); + ord == Greater || ord == Equal } }) diff --git a/src/storage/src/hummock/iterator/test_utils.rs b/src/storage/src/hummock/iterator/test_utils.rs index a462392ce1136..e71ee4d168381 100644 --- a/src/storage/src/hummock/iterator/test_utils.rs +++ b/src/storage/src/hummock/iterator/test_utils.rs @@ -19,7 +19,7 @@ use bytes::Bytes; use itertools::Itertools; use risingwave_common::catalog::TableId; use risingwave_hummock_sdk::key::{FullKey, UserKey}; -use risingwave_hummock_sdk::{HummockEpoch, HummockSstableId}; +use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId}; use risingwave_object_store::object::{ InMemObjectStore, ObjectStore, ObjectStoreImpl, ObjectStoreRef, }; @@ -123,7 +123,7 @@ pub fn iterator_test_value_of(idx: usize) -> Vec { /// correctness of their implementations by comparing the got value and the expected value /// generated by `test_key_of` and `test_value_of`. pub async fn gen_iterator_test_sstable_base( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, opts: SstableBuilderOptions, idx_mapping: impl Fn(usize) -> usize, sstable_store: SstableStoreRef, @@ -131,7 +131,7 @@ pub async fn gen_iterator_test_sstable_base( ) -> Sstable { gen_test_sstable( opts, - sst_id, + object_id, (0..total).map(|i| { ( iterator_test_key_of(idx_mapping(i)), @@ -145,13 +145,13 @@ pub async fn gen_iterator_test_sstable_base( // key=[idx, epoch], value pub async fn gen_iterator_test_sstable_from_kv_pair( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, kv_pairs: Vec<(usize, u64, HummockValue>)>, sstable_store: SstableStoreRef, ) -> Sstable { gen_test_sstable( default_builder_opt_for_test(), - sst_id, + object_id, kv_pairs .into_iter() .map(|kv| (iterator_test_key_of_epoch(kv.0, kv.1), kv.2)), @@ -162,7 +162,7 @@ pub async fn gen_iterator_test_sstable_from_kv_pair( // key=[idx, epoch], value pub async fn gen_iterator_test_sstable_with_range_tombstones( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, kv_pairs: Vec<(usize, u64, HummockValue>)>, delete_ranges: Vec<(usize, usize, u64)>, sstable_store: SstableStoreRef, @@ -180,7 +180,7 @@ pub async fn gen_iterator_test_sstable_with_range_tombstones( .collect_vec(); gen_test_sstable_with_range_tombstone( default_builder_opt_for_test(), - sst_id, + object_id, kv_pairs .into_iter() .map(|kv| (iterator_test_key_of_epoch(kv.0, kv.1), kv.2)), @@ -199,7 +199,7 @@ pub async fn gen_merge_iterator_interleave_test_sstable_iters( let mut result = vec![]; for i in 0..count { let table = gen_iterator_test_sstable_base( - i as HummockSstableId, + i as HummockSstableObjectId, default_builder_opt_for_test(), |x| x * count + i, sstable_store.clone(), @@ -217,7 +217,7 @@ pub async fn gen_merge_iterator_interleave_test_sstable_iters( } pub async fn gen_iterator_test_sstable_with_incr_epoch( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, opts: SstableBuilderOptions, idx_mapping: impl Fn(usize) -> usize, sstable_store: SstableStoreRef, @@ -226,7 +226,7 @@ pub async fn gen_iterator_test_sstable_with_incr_epoch( ) -> Sstable { gen_test_sstable( opts, - sst_id, + object_id, (0..total).map(|i| { ( iterator_test_key_of_epoch(idx_mapping(i), epoch_base + i as u64), diff --git a/src/storage/src/hummock/mod.rs b/src/storage/src/hummock/mod.rs index d655e0bf6c8f8..586e06aebe7cf 100644 --- a/src/storage/src/hummock/mod.rs +++ b/src/storage/src/hummock/mod.rs @@ -65,6 +65,7 @@ pub mod store; pub mod vacuum; mod validator; pub mod value; +pub mod write_limiter; pub use error::*; pub use risingwave_common::cache::{CacheableEntry, LookupResult, LruCache}; @@ -79,9 +80,7 @@ use self::event_handler::ReadVersionMappingType; use self::iterator::HummockIterator; pub use self::sstable_store::*; use super::monitor::HummockStateStoreMetrics; -#[cfg(any(test, feature = "test"))] -use crate::hummock::backup_reader::BackupReader; -use crate::hummock::backup_reader::BackupReaderRef; +use crate::hummock::backup_reader::{BackupReader, BackupReaderRef}; use crate::hummock::compactor::CompactorContext; use crate::hummock::event_handler::hummock_event_handler::BufferTracker; use crate::hummock::event_handler::{HummockEvent, HummockEventHandler}; @@ -91,6 +90,7 @@ use crate::hummock::shared_buffer::shared_buffer_batch::SharedBufferBatch; use crate::hummock::sstable::SstableIteratorReadOptions; use crate::hummock::sstable_store::{SstableStoreRef, TableHolder}; use crate::hummock::store::version::HummockVersionReader; +use crate::hummock::write_limiter::{WriteLimiter, WriteLimiterRef}; use crate::monitor::{CompactorMetrics, StoreLocalStatistic}; use crate::store::{gen_min_epoch, NewLocalOptions, ReadOptions}; @@ -134,6 +134,8 @@ pub struct HummockStorage { /// current_epoch < min_current_epoch cannot be read. min_current_epoch: Arc, + + write_limiter: WriteLimiterRef, } impl HummockStorage { @@ -142,19 +144,24 @@ impl HummockStorage { pub async fn new( options: Arc, sstable_store: SstableStoreRef, - backup_reader: BackupReaderRef, hummock_meta_client: Arc, notification_client: impl NotificationClient, state_store_metrics: Arc, tracing: Arc, compactor_metrics: Arc, ) -> HummockResult { - let sstable_id_manager = Arc::new(SstableIdManager::new( + let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), options.sstable_id_remote_fetch_number, )); - + let backup_reader = BackupReader::new( + &options.backup_storage_url, + &options.backup_storage_directory, + ) + .await + .map_err(HummockError::read_backup_error)?; let filter_key_extractor_manager = Arc::new(FilterKeyExtractorManager::default()); + let write_limiter = Arc::new(WriteLimiter::default()); let (event_tx, mut event_rx) = unbounded_channel(); let observer_manager = ObserverManager::new( @@ -163,6 +170,7 @@ impl HummockStorage { filter_key_extractor_manager.clone(), backup_reader.clone(), event_tx.clone(), + write_limiter.clone(), ), ) .await; @@ -185,7 +193,7 @@ impl HummockStorage { sstable_store.clone(), hummock_meta_client.clone(), compactor_metrics.clone(), - sstable_id_manager.clone(), + sstable_object_id_manager.clone(), filter_key_extractor_manager.clone(), CompactorRuntimeConfig::default(), )); @@ -217,6 +225,7 @@ impl HummockStorage { tracing, backup_reader, min_current_epoch, + write_limiter, }; tokio::spawn(hummock_event_handler.start_hummock_event_handler_worker()); @@ -241,6 +250,7 @@ impl HummockStorage { self.hummock_event_sender.clone(), self.buffer_tracker.get_memory_limiter().clone(), self.tracing.clone(), + self.write_limiter.clone(), option, ) } @@ -249,8 +259,8 @@ impl HummockStorage { self.context.sstable_store.clone() } - pub fn sstable_id_manager(&self) -> &SstableIdManagerRef { - &self.context.sstable_id_manager + pub fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef { + &self.context.sstable_object_id_manager } pub fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManagerRef { @@ -264,6 +274,10 @@ impl HummockStorage { pub fn get_pinned_version(&self) -> PinnedVersion { self.pinned_version.load().deref().deref().clone() } + + pub fn backup_reader(&self) -> BackupReaderRef { + self.backup_reader.clone() + } } #[cfg(any(test, feature = "test"))] @@ -319,7 +333,6 @@ impl HummockStorage { Self::new( options, sstable_store, - BackupReader::unused(), hummock_meta_client, notification_client, Arc::new(HummockStateStoreMetrics::unused()), diff --git a/src/storage/src/hummock/observer_manager.rs b/src/storage/src/hummock/observer_manager.rs index 8f7c519c55ea3..5065b4f2619b7 100644 --- a/src/storage/src/hummock/observer_manager.rs +++ b/src/storage/src/hummock/observer_manager.rs @@ -21,20 +21,20 @@ use risingwave_hummock_sdk::filter_key_extractor::{ }; use risingwave_pb::catalog::Table; use risingwave_pb::hummock::version_update_payload; +use risingwave_pb::meta::relation::RelationInfo; use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::meta::SubscribeResponse; use tokio::sync::mpsc::UnboundedSender; use crate::hummock::backup_reader::BackupReaderRef; use crate::hummock::event_handler::HummockEvent; +use crate::hummock::write_limiter::WriteLimiterRef; pub struct HummockObserverNode { filter_key_extractor_manager: FilterKeyExtractorManagerRef, - backup_reader: BackupReaderRef, - + write_limiter: WriteLimiterRef, version_update_sender: UnboundedSender, - version: u64, } @@ -47,19 +47,25 @@ impl ObserverState for HummockObserverNode { }; match info.to_owned() { - Info::Table(table_catalog) => { - assert!( - resp.version > self.version, - "resp version={:?}, current version={:?}", - resp.version, - self.version - ); - - self.handle_catalog_notification(resp.operation(), table_catalog); - - self.version = resp.version; + Info::RelationGroup(relation_group) => { + for relation in relation_group.relations { + match relation.relation_info.unwrap() { + RelationInfo::Table(table_catalog) => { + assert!( + resp.version > self.version, + "resp version={:?}, current version={:?}", + resp.version, + self.version + ); + + self.handle_catalog_notification(resp.operation(), table_catalog); + + self.version = resp.version; + } + _ => panic!("error type notification"), + }; + } } - Info::HummockVersionDeltas(hummock_version_deltas) => { let _ = self .version_update_sender @@ -75,6 +81,11 @@ impl ObserverState for HummockObserverNode { self.backup_reader.try_refresh_manifest(id.id); } + Info::HummockWriteLimits(write_limits) => { + self.write_limiter + .update_write_limits(write_limits.write_limits); + } + _ => { panic!("error type notification"); } @@ -93,6 +104,12 @@ impl ObserverState for HummockObserverNode { .expect("should get meta backup manifest id") .id, ); + self.write_limiter.update_write_limits( + snapshot + .hummock_write_limits + .expect("should get hummock_write_limits") + .write_limits, + ); let _ = self .version_update_sender .send(HummockEvent::VersionUpdate( @@ -115,12 +132,14 @@ impl HummockObserverNode { filter_key_extractor_manager: FilterKeyExtractorManagerRef, backup_reader: BackupReaderRef, version_update_sender: UnboundedSender, + write_limiter: WriteLimiterRef, ) -> Self { Self { filter_key_extractor_manager, backup_reader, version_update_sender, version: 0, + write_limiter, } } diff --git a/src/storage/src/hummock/sstable/backward_sstable_iterator.rs b/src/storage/src/hummock/sstable/backward_sstable_iterator.rs index c851a4256b083..b598b5cd68f92 100644 --- a/src/storage/src/hummock/sstable/backward_sstable_iterator.rs +++ b/src/storage/src/hummock/sstable/backward_sstable_iterator.rs @@ -17,7 +17,6 @@ use std::future::Future; use std::sync::Arc; use risingwave_hummock_sdk::key::FullKey; -use risingwave_hummock_sdk::KeyComparator; use crate::hummock::iterator::{Backward, HummockIterator}; use crate::hummock::sstable::SstableIteratorReadOptions; @@ -55,7 +54,11 @@ impl BackwardSstableIterator { } /// Seeks to a block, and then seeks to the key if `seek_key` is given. - async fn seek_idx(&mut self, idx: isize, seek_key: Option<&[u8]>) -> HummockResult<()> { + async fn seek_idx( + &mut self, + idx: isize, + seek_key: Option>, + ) -> HummockResult<()> { if idx >= self.sst.value().block_count() as isize || idx < 0 { self.block_iter = None; } else { @@ -104,7 +107,7 @@ impl HummockIterator for BackwardSstableIterator { } fn key(&self) -> FullKey<&[u8]> { - FullKey::decode(self.block_iter.as_ref().expect("no block iter").key()) + self.block_iter.as_ref().expect("no block iter").key() } fn value(&self) -> HummockValue<&[u8]> { @@ -128,8 +131,6 @@ impl HummockIterator for BackwardSstableIterator { fn seek<'a>(&'a mut self, key: FullKey<&'a [u8]>) -> Self::SeekFuture<'a> { async move { - let encoded_key = key.encode(); - let encoded_key_slice = encoded_key.as_slice(); let block_idx = self .sst .value() @@ -139,16 +140,13 @@ impl HummockIterator for BackwardSstableIterator { // Compare by version comparator // Note: we are comparing against the `smallest_key` of the `block`, thus the // partition point should be `prev(<=)` instead of `<`. - let ord = KeyComparator::compare_encoded_full_key( - block_meta.smallest_key.as_slice(), - encoded_key_slice, - ); + let ord = FullKey::decode(&block_meta.smallest_key).cmp(&key); ord == Less || ord == Equal }) .saturating_sub(1); // considering the boundary of 0 let block_idx = block_idx as isize; - self.seek_idx(block_idx, Some(encoded_key_slice)).await?; + self.seek_idx(block_idx, Some(key)).await?; if !self.is_valid() { // Seek to prev block self.seek_idx(block_idx - 1, None).await?; diff --git a/src/storage/src/hummock/sstable/block.rs b/src/storage/src/hummock/sstable/block.rs index a4dc167d83a94..cdb36807f5a66 100644 --- a/src/storage/src/hummock/sstable/block.rs +++ b/src/storage/src/hummock/sstable/block.rs @@ -14,10 +14,12 @@ use std::cmp::Ordering; use std::io::{Read, Write}; +use std::mem::size_of; use std::ops::Range; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use risingwave_hummock_sdk::key::MAX_KEY_LEN; +use risingwave_common::catalog::TableId; +use risingwave_hummock_sdk::key::FullKey; use risingwave_hummock_sdk::KeyComparator; use {lz4, zstd}; @@ -29,25 +31,137 @@ pub const DEFAULT_BLOCK_SIZE: usize = 4 * 1024; pub const DEFAULT_RESTART_INTERVAL: usize = 16; pub const DEFAULT_ENTRY_SIZE: usize = 24; // table_id(u64) + primary_key(u64) + epoch(u64) +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, PartialEq, Debug)] +pub enum LenType { + u8 = 1, + u16 = 2, + u32 = 3, +} + +macro_rules! put_fn { + ($name:ident, $($value:ident: $type:ty),*) => { + fn $name(&self, buf: &mut T, $($value: $type),*) { + match *self { + LenType::u8 => { + $(buf.put_u8($value as u8);)* + }, + + LenType::u16 => { + $(buf.put_u16($value as u16);)* + }, + + LenType::u32 => { + $(buf.put_u32($value as u32);)* + }, + } + } + }; +} + +macro_rules! get_fn { + ($name:ident, $($type:ty),*) => { + #[allow(unused_parens)] + fn $name(&self, buf: &mut T) -> ($($type), *) { + match *self { + LenType::u8 => { + ($(buf.get_u8() as $type),*) + } + LenType::u16 => { + ($(buf.get_u16() as $type),*) + } + LenType::u32 => { + ($(buf.get_u32() as $type),*) + } + } + } + }; +} + +impl From for LenType { + fn from(value: u8) -> Self { + match value { + 1 => LenType::u8, + 2 => LenType::u16, + 3 => LenType::u32, + _ => { + panic!("unexpected type {}", value) + } + } + } +} + +impl LenType { + put_fn!(put, v1: usize); + + put_fn!(put2, v1: usize, v2: usize); + + get_fn!(get, usize); + + get_fn!(get2, usize, usize); + + fn new(len: usize) -> Self { + const U8_MAX: usize = u8::MAX as usize + 1; + const U16_MAX: usize = u16::MAX as usize + 1; + const U32_MAX: usize = u32::MAX as usize + 1; + + match len { + 0..U8_MAX => LenType::u8, + U8_MAX..U16_MAX => LenType::u16, + U16_MAX..U32_MAX => LenType::u32, + _ => unreachable!("unexpected LenType {}", len), + } + } + + fn len(&self) -> usize { + match *self { + Self::u8 => size_of::(), + Self::u16 => size_of::(), + Self::u32 => size_of::(), + } + } +} + +#[derive(Clone, Copy, Debug)] +pub struct RestartPoint { + pub offset: u32, + pub key_len_type: LenType, + pub value_len_type: LenType, +} + +impl RestartPoint { + fn size_of() -> usize { + // store key_len_type and value_len_type in u8 related to `BlockBuidler::build` + // encoding_value = (key_len_type << 4) | value_len_type + std::mem::size_of::() + std::mem::size_of::() + } +} + #[derive(Clone)] pub struct Block { /// Uncompressed entries data, with restart encoded restart points info. - data: Bytes, + pub data: Bytes, /// Uncompressed entried data length. data_len: usize, + + /// Table id of this block. + table_id: TableId, + /// Restart points. - restart_points: Vec, + restart_points: Vec, } impl Block { pub fn decode(buf: Bytes, uncompressed_capacity: usize) -> HummockResult { // Verify checksum. + let xxhash64_checksum = (&buf[buf.len() - 8..]).get_u64_le(); xxhash64_verify(&buf[..buf.len() - 8], xxhash64_checksum)?; // Decompress. let compression = CompressionAlgorithm::decode(&mut &buf[buf.len() - 9..buf.len() - 8])?; let compressed_data = &buf[..buf.len() - 9]; + let buf = match compression { CompressionAlgorithm::None => buf.slice(0..(buf.len() - 9)), CompressionAlgorithm::Lz4 => { @@ -76,19 +190,57 @@ impl Block { } pub fn decode_from_raw(buf: Bytes) -> Self { + let table_id = (&buf[buf.len() - 4..]).get_u32_le(); + // decode restart_points_type_index + let n_index = ((&buf[buf.len() - 8..buf.len() - 4]).get_u32_le()) as usize; + let index_data_len = size_of::() + n_index * RestartPoint::size_of(); + let data_len = buf.len() - 4 - index_data_len; + let mut restart_points_type_index_buf = &buf[data_len..buf.len() - 8]; + + let mut index_key_vec = Vec::with_capacity(n_index); + for _ in 0..n_index { + let offset = restart_points_type_index_buf.get_u32_le(); + let value = restart_points_type_index_buf.get_u8(); + let key_len_type = LenType::from(value >> 4); + let value_len_type = LenType::from(value & 0x0F); + + index_key_vec.push(RestartPoint { + offset, + key_len_type, + value_len_type, + }); + } + // Decode restart points. - let n_restarts = (&buf[buf.len() - 4..]).get_u32_le(); - let data_len = buf.len() - 4 - n_restarts as usize * 4; - let mut restart_points = Vec::with_capacity(n_restarts as usize); - let mut restart_points_buf = &buf[data_len..buf.len() - 4]; + let n_restarts = ((&buf[data_len - 4..]).get_u32_le()) as usize; + let restart_points_len = size_of::() + n_restarts * (size_of::()); + let restarts_end = data_len - 4; + let data_len = data_len - restart_points_len; + let mut restart_points = Vec::with_capacity(n_restarts); + let mut restart_points_buf = &buf[data_len..restarts_end]; + + let mut type_index: usize = 0; + for _ in 0..n_restarts { - restart_points.push(restart_points_buf.get_u32_le()); + let offset = restart_points_buf.get_u32_le(); + if type_index < index_key_vec.len() - 1 + && offset >= index_key_vec[type_index + 1].offset + { + type_index += 1; + } + + restart_points.push(RestartPoint { + offset, + key_len_type: index_key_vec[type_index].key_len_type, + value_len_type: index_key_vec[type_index].value_len_type, + }); } Block { data: buf, data_len, restart_points, + table_id: TableId::new(table_id), } } @@ -100,11 +252,17 @@ impl Block { } pub fn capacity(&self) -> usize { - self.data.len() + self.restart_points.capacity() * std::mem::size_of::() + self.data.len() + + self.restart_points.capacity() * std::mem::size_of::() + + std::mem::size_of::() + } + + pub fn table_id(&self) -> TableId { + self.table_id } /// Gets restart point by index. - pub fn restart_point(&self, index: usize) -> u32 { + pub fn restart_point(&self, index: usize) -> RestartPoint { self.restart_points[index] } @@ -113,18 +271,10 @@ impl Block { self.restart_points.len() } - /// Searches the index of the restart point that the given `offset` belongs to. - pub fn search_restart_point(&self, offset: usize) -> usize { - // Find the largest restart point that equals or less than the given offset. - self.restart_points - .partition_point(|&position| position <= offset as u32) - .saturating_sub(1) // Prevent from underflowing when given is smaller than the first. - } - /// Searches the index of the restart point by partition point. pub fn search_restart_partition_point

(&self, pred: P) -> usize where - P: FnMut(&u32) -> bool, + P: FnMut(&RestartPoint) -> bool, { self.restart_points.partition_point(pred) } @@ -146,42 +296,53 @@ pub struct KeyPrefix { value: usize, /// Used for calculating range, won't be encoded. offset: usize, + + len: usize, } impl KeyPrefix { - pub fn encode(&self, buf: &mut impl BufMut) { - buf.put_u16(self.overlap as u16); - if self.diff >= MAX_KEY_LEN { - buf.put_u16(MAX_KEY_LEN as u16); - buf.put_u32(self.diff as u32); - } else { - buf.put_u16(self.diff as u16); + // This function is used in BlockBuilder::add to provide a wrapper for encode since the + // KeyPrefix len field is only useful in the decode phase + pub fn new_without_len(overlap: usize, diff: usize, value: usize, offset: usize) -> Self { + KeyPrefix { + overlap, + diff, + value, + offset, + len: 0, // not used when encode } - buf.put_u32(self.value as u32); } +} + +impl KeyPrefix { + pub fn encode(&self, buf: &mut impl BufMut, key_len_type: LenType, value_len_type: LenType) { + key_len_type.put2(buf, self.overlap, self.diff); + value_len_type.put(buf, self.value); + } + + pub fn decode( + buf: &mut impl Buf, + offset: usize, + key_len_type: LenType, + value_len_type: LenType, + ) -> Self { + let (overlap, diff) = key_len_type.get2(buf); + let value = value_len_type.get(buf); + + let len = key_len_type.len() * 2 + value_len_type.len(); - pub fn decode(buf: &mut impl Buf, offset: usize) -> Self { - let overlap = buf.get_u16() as usize; - let mut diff = buf.get_u16() as usize; - if diff == MAX_KEY_LEN { - diff = buf.get_u32() as usize; - } - let value = buf.get_u32() as usize; Self { overlap, diff, value, offset, + len, } } /// Encoded length. fn len(&self) -> usize { - if self.diff >= MAX_KEY_LEN { - 12 // 2 + 2 + 4 + 4 - } else { - 8 // 2 + 2 + 4 - } + self.len } /// Gets overlap len. @@ -238,6 +399,11 @@ pub struct BlockBuilder { entry_count: usize, /// Compression algorithm. compression_algorithm: CompressionAlgorithm, + + table_id: Option, + // restart_points_type_index stores only the restart_point corresponding to each type change, + // as an index, in order to reduce space usage + restart_points_type_index: Vec, } impl BlockBuilder { @@ -252,6 +418,8 @@ impl BlockBuilder { last_key: vec![], entry_count: 0, compression_algorithm: options.compression_algorithm, + table_id: None, + restart_points_type_index: Vec::default(), } } @@ -262,44 +430,76 @@ impl BlockBuilder { /// # Format /// /// ```plain - /// For diff len < MAX_KEY_LEN (65536) - /// entry (kv pair): | overlap len (2B) | diff len (2B) | value len(4B) | diff key | value | - /// For diff len >= MAX_KEY_LEN (65536) - /// entry (kv pair): | overlap len (2B) | MAX_KEY_LEN (2B) | diff len (4B) | value len(4B) | diff key | value | + /// entry (kv pair): | overlap len (len_type) | diff len (len_type) | value len(len_type) | diff key | value | /// ``` /// /// # Panics /// /// Panic if key is not added in ASCEND order. - pub fn add(&mut self, key: &[u8], value: &[u8]) { + pub fn add(&mut self, full_key: FullKey<&[u8]>, value: &[u8]) { + let input_table_id = full_key.user_key.table_id.table_id(); + match self.table_id { + Some(current_table_id) => debug_assert_eq!(current_table_id, input_table_id), + None => self.table_id = Some(input_table_id), + } + #[cfg(debug_assertions)] + self.debug_valid(); + + let mut key: BytesMut = Default::default(); + full_key.encode_into_without_table_id(&mut key); if self.entry_count > 0 { debug_assert!(!key.is_empty()); debug_assert_eq!( - KeyComparator::compare_encoded_full_key(&self.last_key[..], key), + KeyComparator::compare_encoded_full_key(&self.last_key[..], &key[..]), Ordering::Less ); } // Update restart point if needed and calculate diff key. - let diff_key = if self.entry_count % self.restart_count == 0 { - self.restart_points.push(self.buf.len() as u32); - key + let k_type = LenType::new(key.len()); + let v_type = LenType::new(value.len()); + + let type_mismatch = if let Some(RestartPoint { + offset: _, + key_len_type: last_key_len_type, + value_len_type: last_value_len_type, + }) = self.restart_points_type_index.last() + { + k_type != *last_key_len_type || v_type != *last_value_len_type } else { - bytes_diff_below_max_key_length(&self.last_key, key) + true }; - let prefix = KeyPrefix { - overlap: key.len() - diff_key.len(), - diff: diff_key.len(), - value: value.len(), - offset: self.buf.len(), + let diff_key = if self.entry_count % self.restart_count == 0 || type_mismatch { + let offset = self.buf.len() as u32; + + self.restart_points.push(offset); + + if type_mismatch { + self.restart_points_type_index.push(RestartPoint { + offset, + key_len_type: k_type, + value_len_type: v_type, + }); + } + + key.as_ref() + } else { + bytes_diff_below_max_key_length(&self.last_key, &key[..]) }; - prefix.encode(&mut self.buf); + let prefix = KeyPrefix::new_without_len( + key.len() - diff_key.len(), + diff_key.len(), + value.len(), + self.buf.len(), + ); + + prefix.encode(&mut self.buf, k_type, v_type); self.buf.put_slice(diff_key); self.buf.put_slice(value); self.last_key.clear(); - self.last_key.extend_from_slice(key); + self.last_key.extend_from_slice(&key); self.entry_count += 1; } @@ -314,13 +514,20 @@ impl BlockBuilder { pub fn clear(&mut self) { self.buf.clear(); self.restart_points.clear(); + self.table_id = None; + self.restart_points_type_index.clear(); self.last_key.clear(); self.entry_count = 0; } /// Calculate block size without compression. pub fn uncompressed_block_size(&mut self) -> usize { - self.buf.len() + (self.restart_points.len() + 1) * std::mem::size_of::() + self.buf.len() + + (self.restart_points.len() + 1) * std::mem::size_of::() + + (RestartPoint::size_of()) // (offset + len_type(u8)) * len + * self.restart_points_type_index.len() + + std::mem::size_of::() // restart_points_type_index len + + std::mem::size_of::() // table_id len } /// Finishes building block. @@ -328,7 +535,7 @@ impl BlockBuilder { /// # Format /// /// ```plain - /// compressed: | entries | restart point 0 (4B) | ... | restart point N-1 (4B) | N (4B) | + /// compressed: | entries | restart point 0 (4B) | ... | restart point N-1 (4B) | N (4B) | restart point index 0 (5B)| ... | restart point index N-1 (5B) | N (4B) /// uncompressed: | compression method (1B) | crc32sum (4B) | /// ``` /// @@ -337,10 +544,32 @@ impl BlockBuilder { /// Panic if there is compression error. pub fn build(&mut self) -> &[u8] { assert!(self.entry_count > 0); + for restart_point in &self.restart_points { self.buf.put_u32_le(*restart_point); } + self.buf.put_u32_le(self.restart_points.len() as u32); + for RestartPoint { + offset, + key_len_type, + value_len_type, + } in &self.restart_points_type_index + { + self.buf.put_u32_le(*offset); + + let mut value: u8 = 0; + value |= *key_len_type as u8; + value <<= 4; + value |= *value_len_type as u8; + + self.buf.put_u8(value); + } + + self.buf + .put_u32_le(self.restart_points_type_index.len() as u32); + + self.buf.put_u32_le(self.table_id.unwrap()); match self.compression_algorithm { CompressionAlgorithm::None => (), CompressionAlgorithm::Lz4 => { @@ -373,22 +602,42 @@ impl BlockBuilder { self.buf = writer.into_inner(); } }; + self.compression_algorithm.encode(&mut self.buf); let checksum = xxhash64_checksum(&self.buf); self.buf.put_u64_le(checksum); + self.buf.as_ref() } /// Approximate block len (uncompressed). pub fn approximate_len(&self) -> usize { - // block + restart_points + restart_points.len + compression_algorithm + checksum - self.buf.len() + 4 * self.restart_points.len() + 4 + 1 + 8 + // block + restart_points + restart_points.len + restart_points_type_indices + + // restart_points_type_indics.len compression_algorithm + checksum + self.buf.len() + + std::mem::size_of::() * self.restart_points.len() // restart_points + + std::mem::size_of::() // restart_points.len + + RestartPoint::size_of() * self.restart_points_type_index.len() // restart_points_type_indics + + std::mem::size_of::() // restart_points_type_indics.len + + std::mem::size_of::() // compression_algorithm + + std::mem::size_of::() // checksum + + std::mem::size_of::() // table_id + } + + pub fn debug_valid(&self) { + if self.entry_count == 0 { + debug_assert!(self.buf.is_empty()); + debug_assert!(self.restart_points.is_empty()); + debug_assert!(self.restart_points_type_index.is_empty()); + debug_assert!(self.last_key.is_empty()); + } } } #[cfg(test)] mod tests { - use bytes::Bytes; + use risingwave_common::catalog::TableId; + use risingwave_hummock_sdk::key::{FullKey, MAX_KEY_LEN}; use super::*; use crate::hummock::{BlockHolder, BlockIterator}; @@ -397,33 +646,34 @@ mod tests { fn test_block_enc_dec() { let options = BlockBuilderOptions::default(); let mut builder = BlockBuilder::new(options); - builder.add(&full_key(b"k1", 1), b"v01"); - builder.add(&full_key(b"k2", 2), b"v02"); - builder.add(&full_key(b"k3", 3), b"v03"); - builder.add(&full_key(b"k4", 4), b"v04"); + builder.add(construct_full_key_struct(0, b"k1", 1), b"v01"); + builder.add(construct_full_key_struct(0, b"k2", 2), b"v02"); + builder.add(construct_full_key_struct(0, b"k3", 3), b"v03"); + builder.add(construct_full_key_struct(0, b"k4", 4), b"v04"); let capacity = builder.uncompressed_block_size(); + assert_eq!(capacity, builder.approximate_len() - 9); let buf = builder.build().to_vec(); let block = Box::new(Block::decode(buf.into(), capacity).unwrap()); let mut bi = BlockIterator::new(BlockHolder::from_owned_block(block)); bi.seek_to_first(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k1", 1)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k1", 1), bi.key()); assert_eq!(b"v01", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k2", 2)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k2", 2), bi.key()); assert_eq!(b"v02", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k3", 3)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k3", 3), bi.key()); assert_eq!(b"v03", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k4", 4)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k4", 4), bi.key()); assert_eq!(b"v04", bi.value()); bi.next(); @@ -442,44 +692,46 @@ mod tests { ..Default::default() }; let mut builder = BlockBuilder::new(options); - builder.add(&full_key(b"k1", 1), b"v01"); - builder.add(&full_key(b"k2", 2), b"v02"); - builder.add(&full_key(b"k3", 3), b"v03"); - builder.add(&full_key(b"k4", 4), b"v04"); - let capcitiy = builder.uncompressed_block_size(); + builder.add(construct_full_key_struct(0, b"k1", 1), b"v01"); + builder.add(construct_full_key_struct(0, b"k2", 2), b"v02"); + builder.add(construct_full_key_struct(0, b"k3", 3), b"v03"); + builder.add(construct_full_key_struct(0, b"k4", 4), b"v04"); + let capacity = builder.uncompressed_block_size(); + assert_eq!(capacity, builder.approximate_len() - 9); let buf = builder.build().to_vec(); - let block = Box::new(Block::decode(buf.into(), capcitiy).unwrap()); + let block = Box::new(Block::decode(buf.into(), capacity).unwrap()); let mut bi = BlockIterator::new(BlockHolder::from_owned_block(block)); bi.seek_to_first(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k1", 1)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k1", 1), bi.key()); assert_eq!(b"v01", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k2", 2)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k2", 2), bi.key()); assert_eq!(b"v02", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k3", 3)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k3", 3), bi.key()); assert_eq!(b"v03", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(b"k4", 4)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, b"k4", 4), bi.key()); assert_eq!(b"v04", bi.value()); bi.next(); assert!(!bi.is_valid()); } - pub fn full_key(user_key: &[u8], epoch: u64) -> Bytes { - let mut buf = BytesMut::with_capacity(user_key.len() + 8); - buf.put_slice(user_key); - buf.put_u64(!epoch); - buf.freeze() + pub fn construct_full_key_struct( + table_id: u32, + table_key: &[u8], + epoch: u64, + ) -> FullKey<&[u8]> { + FullKey::for_test(TableId::new(table_id), table_key, epoch) } #[test] @@ -490,30 +742,86 @@ mod tests { let large_key = vec![b'b'; MAX_KEY_LEN]; let xlarge_key = vec![b'c'; MAX_KEY_LEN + 500]; - builder.add(&full_key(&medium_key, 1), b"v1"); - builder.add(&full_key(&large_key, 2), b"v2"); - builder.add(&full_key(&xlarge_key, 3), b"v3"); + builder.add(construct_full_key_struct(0, &medium_key, 1), b"v1"); + builder.add(construct_full_key_struct(0, &large_key, 2), b"v2"); + builder.add(construct_full_key_struct(0, &xlarge_key, 3), b"v3"); let capacity = builder.uncompressed_block_size(); + assert_eq!(capacity, builder.approximate_len() - 9); let buf = builder.build().to_vec(); let block = Box::new(Block::decode(buf.into(), capacity).unwrap()); let mut bi = BlockIterator::new(BlockHolder::from_owned_block(block)); bi.seek_to_first(); assert!(bi.is_valid()); - assert_eq!(&full_key(&medium_key, 1)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, &medium_key, 1), bi.key()); assert_eq!(b"v1", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(&large_key, 2)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, &large_key, 2), bi.key()); assert_eq!(b"v2", bi.value()); bi.next(); assert!(bi.is_valid()); - assert_eq!(&full_key(&xlarge_key, 3)[..], bi.key()); + assert_eq!(construct_full_key_struct(0, &xlarge_key, 3), bi.key()); assert_eq!(b"v3", bi.value()); bi.next(); assert!(!bi.is_valid()); } + + #[test] + fn test_block_restart_point() { + let options = BlockBuilderOptions::default(); + let mut builder = BlockBuilder::new(options); + + const KEY_COUNT: u8 = 100; + const BUILDER_COUNT: u8 = 5; + + for _ in 0..BUILDER_COUNT { + for index in 0..KEY_COUNT { + if index < 50 { + let mut medium_key = vec![b'A'; MAX_KEY_LEN - 500]; + medium_key.push(index); + builder.add(construct_full_key_struct(0, &medium_key, 1), b"v1"); + } else if index < 80 { + let mut large_key = vec![b'B'; MAX_KEY_LEN]; + large_key.push(index); + builder.add(construct_full_key_struct(0, &large_key, 2), b"v2"); + } else { + let mut xlarge_key = vec![b'C'; MAX_KEY_LEN + 500]; + xlarge_key.push(index); + builder.add(construct_full_key_struct(0, &xlarge_key, 3), b"v3"); + } + } + + let capacity = builder.uncompressed_block_size(); + assert_eq!(capacity, builder.approximate_len() - 9); + let buf = builder.build().to_vec(); + let block = Box::new(Block::decode(buf.into(), capacity).unwrap()); + let mut bi = BlockIterator::new(BlockHolder::from_owned_block(block)); + bi.seek_to_first(); + assert!(bi.is_valid()); + + for index in 0..KEY_COUNT { + if index < 50 { + let mut medium_key = vec![b'A'; MAX_KEY_LEN - 500]; + medium_key.push(index); + assert_eq!(construct_full_key_struct(0, &medium_key, 1), bi.key()); + } else if index < 80 { + let mut large_key = vec![b'B'; MAX_KEY_LEN]; + large_key.push(index); + assert_eq!(construct_full_key_struct(0, &large_key, 2), bi.key()); + } else { + let mut xlarge_key = vec![b'C'; MAX_KEY_LEN + 500]; + xlarge_key.push(index); + assert_eq!(construct_full_key_struct(0, &xlarge_key, 3), bi.key()); + } + bi.next(); + } + + assert!(!bi.is_valid()); + builder.clear(); + } + } } diff --git a/src/storage/src/hummock/sstable/block_iterator.rs b/src/storage/src/hummock/sstable/block_iterator.rs index 26550a39648f2..25395d515a9d6 100644 --- a/src/storage/src/hummock/sstable/block_iterator.rs +++ b/src/storage/src/hummock/sstable/block_iterator.rs @@ -16,9 +16,10 @@ use std::cmp::Ordering; use std::ops::Range; use bytes::BytesMut; -use risingwave_hummock_sdk::KeyComparator; +use risingwave_common::catalog::TableId; +use risingwave_hummock_sdk::key::FullKey; -use super::KeyPrefix; +use super::{KeyPrefix, LenType, RestartPoint}; use crate::hummock::BlockHolder; /// [`BlockIterator`] is used to read kv pairs in a block. @@ -35,6 +36,9 @@ pub struct BlockIterator { value_range: Range, /// Current entry len. entry_len: usize, + + last_key_len_type: LenType, + last_value_len_type: LenType, } impl BlockIterator { @@ -46,6 +50,8 @@ impl BlockIterator { key: BytesMut::default(), value_range: 0..0, entry_len: 0, + last_key_len_type: LenType::u8, + last_value_len_type: LenType::u8, } } @@ -69,9 +75,14 @@ impl BlockIterator { self.try_prev_inner() } - pub fn key(&self) -> &[u8] { + pub fn table_id(&self) -> TableId { + self.block.table_id() + } + + pub fn key(&self) -> FullKey<&[u8]> { assert!(self.is_valid()); - &self.key[..] + + FullKey::from_slice_without_table_id(self.table_id(), &self.key[..]) } pub fn value(&self) -> &[u8] { @@ -92,13 +103,15 @@ impl BlockIterator { self.next_until_prev_offset(self.block.len()); } - pub fn seek(&mut self, key: &[u8]) { + pub fn seek(&mut self, key: FullKey<&[u8]>) { self.seek_restart_point_by_key(key); + self.next_until_key(key); } - pub fn seek_le(&mut self, key: &[u8]) { + pub fn seek_le(&mut self, key: FullKey<&[u8]>) { self.seek_restart_point_by_key(key); + self.next_until_key(key); if !self.is_valid() { self.seek_to_last(); @@ -136,35 +149,42 @@ impl BlockIterator { if offset >= self.block.len() { return false; } - let prefix = self.decode_prefix_at(offset); + + // after seek, offset meet a new restart point we need to update it + if self.restart_point_index + 1 < self.block.restart_point_len() + && offset + >= self + .block + .restart_point(self.restart_point_index + 1) + .offset as usize + { + let new_restart_point_index = self.restart_point_index + 1; + self.update_restart_point(new_restart_point_index); + } + + let prefix = + self.decode_prefix_at(offset, self.last_key_len_type, self.last_value_len_type); self.key.truncate(prefix.overlap_len()); self.key .extend_from_slice(&self.block.data()[prefix.diff_key_range()]); + self.value_range = prefix.value_range(); self.offset = offset; self.entry_len = prefix.entry_len(); - if self.restart_point_index + 1 < self.block.restart_point_len() - && self.offset >= self.block.restart_point(self.restart_point_index + 1) as usize - { - self.restart_point_index += 1; - } + true } /// Moves forward until reaching the first that equals or larger than the given `key`. - fn next_until_key(&mut self, key: &[u8]) { - while self.is_valid() - && KeyComparator::compare_encoded_full_key(&self.key[..], key) == Ordering::Less - { + fn next_until_key(&mut self, key: FullKey<&[u8]>) { + while self.is_valid() && self.key().cmp(&key) == Ordering::Less { self.next_inner(); } } /// Moves backward until reaching the first key that equals or smaller than the given `key`. - fn prev_until_key(&mut self, key: &[u8]) { - while self.is_valid() - && KeyComparator::compare_encoded_full_key(&self.key[..], key) == Ordering::Greater - { + fn prev_until_key(&mut self, key: FullKey<&[u8]>) { + while self.is_valid() && self.key().cmp(&key) == Ordering::Greater { self.prev_inner(); } } @@ -195,7 +215,8 @@ impl BlockIterator { if self.offset == 0 { return false; } - if self.block.restart_point(self.restart_point_index) as usize == self.offset { + + if self.block.restart_point(self.restart_point_index).offset as usize == self.offset { self.restart_point_index -= 1; } let origin_offset = self.offset; @@ -205,46 +226,79 @@ impl BlockIterator { } /// Decodes [`KeyPrefix`] at given offset. - fn decode_prefix_at(&self, offset: usize) -> KeyPrefix { - KeyPrefix::decode(&mut &self.block.data()[offset..], offset) + fn decode_prefix_at( + &self, + offset: usize, + key_len_type: LenType, + value_len_type: LenType, + ) -> KeyPrefix { + KeyPrefix::decode( + &mut &self.block.data()[offset..], + offset, + key_len_type, + value_len_type, + ) } /// Searches the restart point index that the given `key` belongs to. - fn search_restart_point_index_by_key(&self, key: &[u8]) -> usize { + fn search_restart_point_index_by_key(&self, key: FullKey<&[u8]>) -> usize { // Find the largest restart point that restart key equals or less than the given key. self.block - .search_restart_partition_point(|&probe| { - let prefix = self.decode_prefix_at(probe as usize); - let probe_key = &self.block.data()[prefix.diff_key_range()]; - match KeyComparator::compare_encoded_full_key(probe_key, key) { - Ordering::Less | Ordering::Equal => true, - Ordering::Greater => false, - } - }) + .search_restart_partition_point( + |&RestartPoint { + offset: probe, + key_len_type, + value_len_type, + }| { + let prefix = + self.decode_prefix_at(probe as usize, key_len_type, value_len_type); + let probe_key = &self.block.data()[prefix.diff_key_range()]; + let full_probe_key = + FullKey::from_slice_without_table_id(self.block.table_id(), probe_key); + match full_probe_key.cmp(&key) { + Ordering::Less | Ordering::Equal => true, + Ordering::Greater => false, + } + }, + ) .saturating_sub(1) // Prevent from underflowing when given is smaller than the first. } /// Seeks to the restart point that the given `key` belongs to. - fn seek_restart_point_by_key(&mut self, key: &[u8]) { + fn seek_restart_point_by_key(&mut self, key: FullKey<&[u8]>) { let index = self.search_restart_point_index_by_key(key); self.seek_restart_point_by_index(index) } /// Seeks to the restart point by given restart point index. fn seek_restart_point_by_index(&mut self, index: usize) { - let offset = self.block.restart_point(index) as usize; - let prefix = self.decode_prefix_at(offset); + let restart_point = self.block.restart_point(index); + let offset = restart_point.offset as usize; + let prefix = self.decode_prefix_at( + offset, + restart_point.key_len_type, + restart_point.value_len_type, + ); + self.key = BytesMut::from(&self.block.data()[prefix.diff_key_range()]); self.value_range = prefix.value_range(); self.offset = offset; self.entry_len = prefix.entry_len(); + self.update_restart_point(index); + } + + fn update_restart_point(&mut self, index: usize) { self.restart_point_index = index; + let restart_point = self.block.restart_point(index); + + self.last_key_len_type = restart_point.key_len_type; + self.last_value_len_type = restart_point.value_len_type; } } #[cfg(test)] mod tests { - use bytes::{BufMut, Bytes}; + use risingwave_common::catalog::TableId; use super::*; use crate::hummock::{Block, BlockBuilder, BlockBuilderOptions}; @@ -252,10 +306,10 @@ mod tests { fn build_iterator_for_test() -> BlockIterator { let options = BlockBuilderOptions::default(); let mut builder = BlockBuilder::new(options); - builder.add(&full_key(b"k01", 1), b"v01"); - builder.add(&full_key(b"k02", 2), b"v02"); - builder.add(&full_key(b"k04", 4), b"v04"); - builder.add(&full_key(b"k05", 5), b"v05"); + builder.add(construct_full_key_struct(0, b"k01", 1), b"v01"); + builder.add(construct_full_key_struct(0, b"k02", 2), b"v02"); + builder.add(construct_full_key_struct(0, b"k04", 4), b"v04"); + builder.add(construct_full_key_struct(0, b"k05", 5), b"v05"); let capacity = builder.uncompressed_block_size(); let buf = builder.build().to_vec(); BlockIterator::new(BlockHolder::from_owned_block(Box::new( @@ -268,7 +322,7 @@ mod tests { let mut it = build_iterator_for_test(); it.seek_to_first(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k01", 1)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k01", 1), it.key()); assert_eq!(b"v01", it.value()); } @@ -277,45 +331,51 @@ mod tests { let mut it = build_iterator_for_test(); it.seek_to_last(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k05", 5)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k05", 5), it.key()); assert_eq!(b"v05", it.value()); } #[test] fn test_seek_none_front() { let mut it = build_iterator_for_test(); - it.seek(&full_key(b"k00", 0)[..]); + it.seek(construct_full_key_struct(0, b"k00", 0)); assert!(it.is_valid()); - assert_eq!(&full_key(b"k01", 1)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k01", 1), it.key()); assert_eq!(b"v01", it.value()); let mut it = build_iterator_for_test(); - it.seek_le(&full_key(b"k00", 0)[..]); + it.seek_le(construct_full_key_struct(0, b"k00", 0)); assert!(!it.is_valid()); } #[test] fn test_seek_none_back() { let mut it = build_iterator_for_test(); - it.seek(&full_key(b"k06", 6)[..]); + it.seek(construct_full_key_struct(0, b"k06", 6)); assert!(!it.is_valid()); let mut it = build_iterator_for_test(); - it.seek_le(&full_key(b"k06", 6)[..]); + it.seek_le(construct_full_key_struct(0, b"k06", 6)); assert!(it.is_valid()); - assert_eq!(&full_key(b"k05", 5)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k05", 5), it.key()); assert_eq!(b"v05", it.value()); } #[test] fn bi_direction_seek() { let mut it = build_iterator_for_test(); - it.seek(&full_key(b"k03", 3)[..]); - assert_eq!(&full_key(format!("k{:02}", 4).as_bytes(), 4)[..], it.key()); + it.seek(construct_full_key_struct(0, b"k03", 3)); + assert_eq!( + construct_full_key_struct(0, format!("k{:02}", 4).as_bytes(), 4), + it.key() + ); - it.seek_le(&full_key(b"k03", 3)[..]); - assert_eq!(&full_key(format!("k{:02}", 2).as_bytes(), 2)[..], it.key()); + it.seek_le(construct_full_key_struct(0, b"k03", 3)); + assert_eq!( + construct_full_key_struct(0, format!("k{:02}", 2).as_bytes(), 2), + it.key() + ); } #[test] @@ -324,22 +384,22 @@ mod tests { it.seek_to_first(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k01", 1)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k01", 1), it.key()); assert_eq!(b"v01", it.value()); it.next(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k02", 2)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k02", 2), it.key()); assert_eq!(b"v02", it.value()); it.next(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k04", 4)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k04", 4), it.key()); assert_eq!(b"v04", it.value()); it.next(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k05", 5)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k05", 5), it.key()); assert_eq!(b"v05", it.value()); it.next(); @@ -352,22 +412,22 @@ mod tests { it.seek_to_last(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k05", 5)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k05", 5), it.key()); assert_eq!(b"v05", it.value()); it.prev(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k04", 4)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k04", 4), it.key()); assert_eq!(b"v04", it.value()); it.prev(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k02", 2)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k02", 2), it.key()); assert_eq!(b"v02", it.value()); it.prev(); assert!(it.is_valid()); - assert_eq!(&full_key(b"k01", 1)[..], it.key()); + assert_eq!(construct_full_key_struct(0, b"k01", 1), it.key()); assert_eq!(b"v01", it.value()); it.prev(); @@ -378,20 +438,30 @@ mod tests { fn test_seek_forward_backward_iterate() { let mut it = build_iterator_for_test(); - it.seek(&full_key(b"k03", 3)[..]); - assert_eq!(&full_key(format!("k{:02}", 4).as_bytes(), 4)[..], it.key()); + it.seek(construct_full_key_struct(0, b"k03", 3)); + assert_eq!( + construct_full_key_struct(0, format!("k{:02}", 4).as_bytes(), 4), + it.key() + ); it.prev(); - assert_eq!(&full_key(format!("k{:02}", 2).as_bytes(), 2)[..], it.key()); + assert_eq!( + construct_full_key_struct(0, format!("k{:02}", 2).as_bytes(), 2), + it.key() + ); it.next(); - assert_eq!(&full_key(format!("k{:02}", 4).as_bytes(), 4)[..], it.key()); - } - - pub fn full_key(user_key: &[u8], epoch: u64) -> Bytes { - let mut buf = BytesMut::with_capacity(user_key.len() + 8); - buf.put_slice(user_key); - buf.put_u64(!epoch); - buf.freeze() + assert_eq!( + construct_full_key_struct(0, format!("k{:02}", 4).as_bytes(), 4), + it.key() + ); + } + + pub fn construct_full_key_struct( + table_id: u32, + table_key: &[u8], + epoch: u64, + ) -> FullKey<&[u8]> { + FullKey::for_test(TableId::new(table_id), table_key, epoch) } } diff --git a/src/storage/src/hummock/sstable/builder.rs b/src/storage/src/hummock/sstable/builder.rs index 22075a6423185..81871e92890e1 100644 --- a/src/storage/src/hummock/sstable/builder.rs +++ b/src/storage/src/hummock/sstable/builder.rs @@ -83,6 +83,7 @@ pub struct SstableBuilderOutput { pub writer_output: WO, pub avg_key_size: usize, pub avg_value_size: usize, + pub epoch_count: usize, } pub struct SstableBuilder { @@ -119,8 +120,7 @@ pub struct SstableBuilder { filter_builder: F, - min_epoch: u64, - max_epoch: u64, + epoch_set: BTreeSet, } impl SstableBuilder { @@ -168,8 +168,7 @@ impl SstableBuilder { total_key_count: 0, table_stats: Default::default(), last_table_stats: Default::default(), - min_epoch: u64::MAX, - max_epoch: u64::MIN, + epoch_set: BTreeSet::default(), } } @@ -189,7 +188,7 @@ impl SstableBuilder { /// Add kv pair to sstable. pub async fn add( &mut self, - full_key: &FullKey>, + full_key: FullKey<&[u8]>, value: HummockValue<&[u8]>, is_new_user_key: bool, ) -> HummockResult<()> { @@ -238,6 +237,8 @@ impl SstableBuilder { self.total_key_count += 1; self.last_table_stats.total_key_count += 1; + self.epoch_set.insert(full_key.epoch); + if is_new_table && !self.block_builder.is_empty() { self.build_block().await?; } @@ -252,8 +253,7 @@ impl SstableBuilder { }) } - self.block_builder - .add(self.raw_key.as_ref(), self.raw_value.as_ref()); + self.block_builder.add(full_key, self.raw_value.as_ref()); self.last_table_stats.total_key_size += full_key.encoded_len() as i64; self.last_table_stats.total_value_size += value.encoded_len() as i64; @@ -263,9 +263,6 @@ impl SstableBuilder { self.raw_key.clear(); self.raw_value.clear(); - self.min_epoch = cmp::min(self.min_epoch, full_key.epoch); - self.max_epoch = cmp::max(self.max_epoch, full_key.epoch); - if self.block_builder.approximate_len() >= self.options.block_capacity { self.build_block().await?; } @@ -363,8 +360,51 @@ impl SstableBuilder { (tombstone_min_epoch, tombstone_max_epoch) }; + let (avg_key_size, avg_value_size) = if self.table_stats.is_empty() { + (0, 0) + } else { + let total_key_count: usize = self + .table_stats + .values() + .map(|s| s.total_key_count as usize) + .sum(); + + if total_key_count == 0 { + (0, 0) + } else { + let total_key_size: usize = self + .table_stats + .values() + .map(|s| s.total_key_size as usize) + .sum(); + + let total_value_size: usize = self + .table_stats + .values() + .map(|s| s.total_value_size as usize) + .sum(); + + ( + total_key_size / total_key_count, + total_value_size / total_key_count, + ) + } + }; + + let (min_epoch, max_epoch) = { + if self.epoch_set.is_empty() { + (u64::MAX, u64::MIN) + } else { + ( + *self.epoch_set.first().unwrap(), + *self.epoch_set.last().unwrap(), + ) + } + }; + let sst_info = SstableInfo { - id: self.sstable_id, + object_id: self.sstable_id, + sst_id: self.sstable_id, key_range: Some(risingwave_pb::hummock::KeyRange { left: meta.smallest_key.clone(), right: meta.largest_key.clone(), @@ -375,38 +415,22 @@ impl SstableBuilder { meta_offset: meta.meta_offset, stale_key_count: self.stale_key_count, total_key_count: self.total_key_count, - divide_version: 0, uncompressed_file_size: uncompressed_file_size + meta.encoded_size() as u64, - min_epoch: cmp::min(self.min_epoch, tombstone_min_epoch), - max_epoch: cmp::max(self.max_epoch, tombstone_max_epoch), + min_epoch: cmp::min(min_epoch, tombstone_min_epoch), + max_epoch: cmp::max(max_epoch, tombstone_max_epoch), }; tracing::trace!( - "meta_size {} bloom_filter_size {} add_key_counts {} stale_key_count {} min_epoch {} max_epoch {}", + "meta_size {} bloom_filter_size {} add_key_counts {} stale_key_count {} min_epoch {} max_epoch {} epoch_count {}", meta.encoded_size(), meta.bloom_filter.len(), self.total_key_count, self.stale_key_count, - self.min_epoch, - self.max_epoch, + min_epoch, + max_epoch, + self.epoch_set.len() ); let bloom_filter_size = meta.bloom_filter.len(); - let (avg_key_size, avg_value_size) = if self.table_stats.is_empty() { - (0, 0) - } else { - let avg_key_size = self - .table_stats - .values() - .map(|s| s.total_key_size as usize) - .sum::() - / self.table_stats.len(); - let avg_value_size = self - .table_stats - .values() - .map(|s| s.total_value_size as usize) - .sum::() - / self.table_stats.len(); - (avg_key_size, avg_value_size) - }; + let writer_output = self.writer.finish(meta).await?; Ok(SstableBuilderOutput:: { sst_info: LocalSstableInfo::with_stats(sst_info, self.table_stats), @@ -414,6 +438,7 @@ impl SstableBuilder { writer_output, avg_key_size, avg_value_size, + epoch_count: self.epoch_set.len(), }) } @@ -526,9 +551,13 @@ pub(super) mod tests { let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opt), opt); for i in 0..TEST_KEYS_COUNT { - b.add(&test_key_of(i), HummockValue::put(&test_value_of(i)), true) - .await - .unwrap(); + b.add( + test_key_of(i).to_ref(), + HummockValue::put(&test_value_of(i)), + true, + ) + .await + .unwrap(); } let output = b.finish().await.unwrap(); diff --git a/src/storage/src/hummock/sstable/forward_sstable_iterator.rs b/src/storage/src/hummock/sstable/forward_sstable_iterator.rs index b9fa6f5e9fdb4..16cbae6f5d1bc 100644 --- a/src/storage/src/hummock/sstable/forward_sstable_iterator.rs +++ b/src/storage/src/hummock/sstable/forward_sstable_iterator.rs @@ -19,7 +19,6 @@ use std::ops::Bound::*; use std::sync::Arc; use risingwave_hummock_sdk::key::FullKey; -use risingwave_hummock_sdk::KeyComparator; use super::super::{HummockResult, HummockValue}; use super::Sstable; @@ -101,7 +100,7 @@ impl PrefetchContext { if *prefetched_idx == idx { false } else { - tracing::warn!(target: "events::storage::sstable::block_seek", "prefetch mismatch: sstable_id = {}, block_id = {}, prefetched_block_id = {}", sst.id, idx, *prefetched_idx); + tracing::warn!(target: "events::storage::sstable::block_seek", "prefetch mismatch: sstable_object_id = {}, block_id = {}, prefetched_block_id = {}", sst.id, idx, *prefetched_idx); self.prefetched_blocks.clear(); true } @@ -224,10 +223,14 @@ impl SstableIterator { } /// Seeks to a block, and then seeks to the key if `seek_key` is given. - async fn seek_idx(&mut self, idx: usize, seek_key: Option<&[u8]>) -> HummockResult<()> { + async fn seek_idx( + &mut self, + idx: usize, + seek_key: Option>, + ) -> HummockResult<()> { tracing::trace!( target: "events::storage::sstable::block_seek", - "table iterator seek: sstable_id = {}, block_id = {}", + "table iterator seek: sstable_object_id = {}, block_id = {}", self.sst.value().id, idx, ); @@ -280,7 +283,7 @@ impl HummockIterator for SstableIterator { } fn key(&self) -> FullKey<&[u8]> { - FullKey::decode(self.block_iter.as_ref().expect("no block iter").key()) + self.block_iter.as_ref().expect("no block iter").key() } fn value(&self) -> HummockValue<&[u8]> { @@ -302,7 +305,6 @@ impl HummockIterator for SstableIterator { fn seek<'a>(&'a mut self, key: FullKey<&'a [u8]>) -> Self::SeekFuture<'a> { async move { - let encoded_key = key.encode(); let block_idx = self .sst .value() @@ -312,17 +314,13 @@ impl HummockIterator for SstableIterator { // compare by version comparator // Note: we are comparing against the `smallest_key` of the `block`, thus the // partition point should be `prev(<=)` instead of `<`. - let ord = KeyComparator::compare_encoded_full_key( - block_meta.smallest_key.as_slice(), - encoded_key.as_slice(), - ); + let ord = FullKey::decode(&block_meta.smallest_key).cmp(&key); ord == Less || ord == Equal }) .saturating_sub(1); // considering the boundary of 0 self.init_block_fetcher(block_idx); - self.seek_idx(block_idx, Some(encoded_key.as_slice())) - .await?; + self.seek_idx(block_idx, Some(key)).await?; if !self.is_valid() { // seek to next block self.seek_idx(block_idx + 1, None).await?; diff --git a/src/storage/src/hummock/sstable/mod.rs b/src/storage/src/hummock/sstable/mod.rs index a9f5dbd5cbc01..a82d1205ece51 100644 --- a/src/storage/src/hummock/sstable/mod.rs +++ b/src/storage/src/hummock/sstable/mod.rs @@ -40,14 +40,14 @@ use bytes::{Buf, BufMut}; pub use forward_sstable_iterator::*; mod backward_sstable_iterator; pub use backward_sstable_iterator::*; -use risingwave_hummock_sdk::key::{KeyPayloadType, TableKey, UserKey}; -use risingwave_hummock_sdk::{HummockEpoch, HummockSstableId}; +use risingwave_hummock_sdk::key::{FullKey, KeyPayloadType, TableKey, UserKey}; +use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId}; #[cfg(test)] use risingwave_pb::hummock::{KeyRange, SstableInfo}; mod delete_range_aggregator; mod filter; -mod sstable_id_manager; +mod sstable_object_id_manager; mod utils; pub use delete_range_aggregator::{ @@ -55,7 +55,7 @@ pub use delete_range_aggregator::{ RangeTombstonesCollector, SstableDeleteRangeIterator, }; pub use filter::FilterBuilder; -pub use sstable_id_manager::*; +pub use sstable_object_id_manager::*; pub use utils::CompressionAlgorithm; use utils::{get_length_prefixed_slice, put_length_prefixed_slice}; use xxhash_rust::{xxh32, xxh64}; @@ -125,7 +125,7 @@ impl DeleteRangeTombstone { /// [`Sstable`] is a handle for accessing SST. #[derive(Clone)] pub struct Sstable { - pub id: HummockSstableId, + pub id: HummockSstableObjectId, pub meta: SstableMeta, pub filter_reader: XorFilterReader, } @@ -140,7 +140,7 @@ impl Debug for Sstable { } impl Sstable { - pub fn new(id: HummockSstableId, mut meta: SstableMeta) -> Self { + pub fn new(id: HummockSstableObjectId, mut meta: SstableMeta) -> Self { let filter_data = std::mem::take(&mut meta.bloom_filter); let filter_reader = XorFilterReader::new(filter_data); Self { @@ -196,7 +196,8 @@ impl Sstable { #[cfg(test)] pub fn get_sstable_info(&self) -> SstableInfo { SstableInfo { - id: self.id, + object_id: self.id, + sst_id: self.id, key_range: Some(KeyRange { left: self.meta.smallest_key.clone(), right: self.meta.largest_key.clone(), @@ -207,7 +208,6 @@ impl Sstable { meta_offset: self.meta.meta_offset, stale_key_count: 0, total_key_count: self.meta.key_count as u64, - divide_version: 0, uncompressed_file_size: self.meta.estimated_size as u64, min_epoch: 0, max_epoch: 0, @@ -253,6 +253,10 @@ impl BlockMeta { pub fn encoded_size(&self) -> usize { 16 /* offset + len + key len + uncompressed size */ + self.smallest_key.len() } + + pub fn table_id(&self) -> TableId { + FullKey::decode(&self.smallest_key).user_key.table_id + } } #[derive(Clone, PartialEq, Eq, Debug)] @@ -279,7 +283,9 @@ impl SstableMeta { /// | estimated size (4B) | key count (4B) | /// | smallest key len (4B) | smallest key | /// | largest key len (4B) | largest key | + /// | M (4B) | /// | range-tombstone 0 | ... | range-tombstone M-1 | + /// | file offset of this meta block (8B) | /// | checksum (8B) | version (4B) | magic (4B) | /// ``` pub fn encode_to_bytes(&self) -> Vec { @@ -299,11 +305,11 @@ impl SstableMeta { buf.put_u32_le(self.key_count); put_length_prefixed_slice(buf, &self.smallest_key); put_length_prefixed_slice(buf, &self.largest_key); - buf.put_u64_le(self.meta_offset); buf.put_u32_le(self.range_tombstone_list.len() as u32); for tombstone in &self.range_tombstone_list { tombstone.encode(buf); } + buf.put_u64_le(self.meta_offset); let checksum = xxhash64_checksum(&buf[start_offset..]); buf.put_u64_le(checksum); buf.put_u32_le(VERSION); @@ -340,13 +346,13 @@ impl SstableMeta { let key_count = buf.get_u32_le(); let smallest_key = get_length_prefixed_slice(buf); let largest_key = get_length_prefixed_slice(buf); - let meta_offset = buf.get_u64_le(); let range_del_count = buf.get_u32_le() as usize; let mut range_tombstone_list = Vec::with_capacity(range_del_count); for _ in 0..range_del_count { let tombstone = DeleteRangeTombstone::decode(buf); range_tombstone_list.push(tombstone); } + let meta_offset = buf.get_u64_le(); Ok(Self { block_metas, diff --git a/src/storage/src/hummock/sstable/multi_builder.rs b/src/storage/src/hummock/sstable/multi_builder.rs index 667ee8b834ba8..93eceb6c9f256 100644 --- a/src/storage/src/hummock/sstable/multi_builder.rs +++ b/src/storage/src/hummock/sstable/multi_builder.rs @@ -141,7 +141,7 @@ where /// allowed, where `allow_split` should be `false`. pub async fn add_full_key( &mut self, - full_key: &FullKey<&[u8]>, + full_key: FullKey<&[u8]>, value: HummockValue<&[u8]>, is_new_user_key: bool, ) -> HummockResult<()> { @@ -210,6 +210,12 @@ where .sstable_avg_value_size .observe(builder_output.avg_value_size as _); } + + if builder_output.epoch_count != 0 { + self.compactor_metrics + .sstable_distinct_epoch_count + .observe(builder_output.epoch_count as _); + } } self.sst_outputs.push(SplitTableOutput { upload_join_handle: builder_output.writer_output, @@ -334,7 +340,7 @@ mod tests { for i in 0..table_capacity { builder .add_full_key( - &FullKey::from_user_key( + FullKey::from_user_key( test_user_key_of(i).as_ref(), (table_capacity - i) as u64, ), @@ -364,7 +370,7 @@ mod tests { epoch -= 1; builder .add_full_key( - &FullKey::from_user_key(test_user_key_of(1).as_ref(), epoch), + FullKey::from_user_key(test_user_key_of(1).as_ref(), epoch), HummockValue::put(b"v"), true, ) @@ -402,7 +408,7 @@ mod tests { opts, )); builder - .add_full_key(&test_key_of(0).to_ref(), HummockValue::put(b"v"), false) + .add_full_key(test_key_of(0).to_ref(), HummockValue::put(b"v"), false) .await .unwrap(); } @@ -426,7 +432,7 @@ mod tests { ); builder .add_full_key( - &FullKey::for_test(table_id, b"k", 233), + FullKey::for_test(table_id, b"k", 233), HummockValue::put(b"v"), false, ) diff --git a/src/storage/src/hummock/sstable/sstable_id_manager.rs b/src/storage/src/hummock/sstable/sstable_id_manager.rs deleted file mode 100644 index b2a371a55951d..0000000000000 --- a/src/storage/src/hummock/sstable/sstable_id_manager.rs +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::cmp; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::ops::DerefMut; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; - -use itertools::Itertools; -use parking_lot::Mutex; -use risingwave_hummock_sdk::{HummockEpoch, HummockSstableId, SstIdRange}; -use risingwave_pb::meta::heartbeat_request::extra_info::Info; -use risingwave_rpc_client::{ExtraInfoSource, HummockMetaClient}; -use sync_point::sync_point; -use tokio::sync::oneshot; - -use crate::hummock::{HummockError, HummockResult}; - -pub type SstableIdManagerRef = Arc; - -/// 1. Caches SST ids fetched from meta. -/// 2. Maintains GC watermark SST id. -/// -/// During full GC, SST in object store with id >= watermark SST id will be excluded from orphan SST -/// candidate and thus won't be deleted. -pub struct SstableIdManager { - // Lock order: `wait_queue` before `available_sst_ids`. - wait_queue: Mutex>>>, - available_sst_ids: Mutex, - remote_fetch_number: u32, - hummock_meta_client: Arc, - sst_id_tracker: SstIdTracker, -} - -impl SstableIdManager { - pub fn new(hummock_meta_client: Arc, remote_fetch_number: u32) -> Self { - Self { - wait_queue: Default::default(), - available_sst_ids: Mutex::new(SstIdRange::new( - HummockSstableId::MIN, - HummockSstableId::MIN, - )), - remote_fetch_number, - hummock_meta_client, - sst_id_tracker: SstIdTracker::new(), - } - } - - /// Returns a new SST id. - /// The id is guaranteed to be monotonic increasing. - pub async fn get_new_sst_id(self: &Arc) -> HummockResult { - self.map_next_sst_id(|available_sst_ids| available_sst_ids.get_next_sst_id()) - .await - } - - /// Executes `f` with next SST id. - /// May fetch new SST ids via RPC. - async fn map_next_sst_id(self: &Arc, f: F) -> HummockResult - where - F: Fn(&mut SstIdRange) -> Option, - { - loop { - // 1. Try to get - if let Some(new_id) = f(self.available_sst_ids.lock().deref_mut()) { - return Ok(new_id); - } - // 2. Otherwise either fetch new ids, or wait for previous fetch if any. - let waiter = { - let mut guard = self.wait_queue.lock(); - if let Some(new_id) = f(self.available_sst_ids.lock().deref_mut()) { - return Ok(new_id); - } - let wait_queue = guard.deref_mut(); - if let Some(wait_queue) = wait_queue { - let (tx, rx) = oneshot::channel(); - wait_queue.push(tx); - Some(rx) - } else { - *wait_queue = Some(vec![]); - None - } - }; - if let Some(waiter) = waiter { - // Wait for previous fetch - sync_point!("MAP_NEXT_SST_ID.AS_FOLLOWER"); - let _ = waiter.await; - continue; - } - // Fetch new ids. - sync_point!("MAP_NEXT_SST_ID.AS_LEADER"); - sync_point!("MAP_NEXT_SST_ID.BEFORE_FETCH"); - let this = self.clone(); - tokio::spawn(async move { - let new_sst_ids = match this - .hummock_meta_client - .get_new_sst_ids(this.remote_fetch_number) - .await - .map_err(HummockError::meta_error) - { - Ok(new_sst_ids) => new_sst_ids, - Err(err) => { - this.notify_waiters(false); - return Err(err); - } - }; - sync_point!("MAP_NEXT_SST_ID.AFTER_FETCH"); - sync_point!("MAP_NEXT_SST_ID.BEFORE_FILL_CACHE"); - // Update local cache. - let result = { - let mut guard = this.available_sst_ids.lock(); - let available_sst_ids = guard.deref_mut(); - if new_sst_ids.start_id < available_sst_ids.end_id { - Err(HummockError::meta_error(format!( - "SST id moves backwards. new {} < old {}", - new_sst_ids.start_id, available_sst_ids.end_id - ))) - } else { - *available_sst_ids = new_sst_ids; - Ok(()) - } - }; - this.notify_waiters(result.is_ok()); - result - }) - .await - .unwrap()?; - } - } - - /// Adds a new watermark SST id using the next unused SST id. - /// Returns a tracker id. It's used to remove the watermark later. - /// - Uses given 'epoch' as tracker id if provided. - /// - Uses a generated tracker id otherwise. - pub async fn add_watermark_sst_id( - self: &Arc, - epoch: Option, - ) -> HummockResult { - let tracker_id = match epoch { - None => self.sst_id_tracker.get_next_auto_tracker_id(), - Some(epoch) => TrackerId::Epoch(epoch), - }; - let next_sst_id = self - .map_next_sst_id(|available_sst_ids| available_sst_ids.peek_next_sst_id()) - .await?; - self.sst_id_tracker.add_tracker(tracker_id, next_sst_id); - Ok(tracker_id) - } - - pub fn remove_watermark_sst_id(&self, tracker_id: TrackerId) { - self.sst_id_tracker.remove_tracker(tracker_id); - } - - /// Returns GC watermark. It equals - /// - min(effective watermarks), if number of effective watermarks > 0. - /// - `HummockSstableId::MAX`, if no effective watermark. - pub fn global_watermark_sst_id(&self) -> HummockSstableId { - self.sst_id_tracker - .tracking_sst_ids() - .into_iter() - .min() - .unwrap_or(HummockSstableId::MAX) - } - - fn notify_waiters(&self, success: bool) { - let mut guard = self.wait_queue.lock(); - let wait_queue = guard.deref_mut().take().unwrap(); - for notify in wait_queue { - let _ = notify.send(success); - } - } -} - -#[async_trait::async_trait] -impl ExtraInfoSource for SstableIdManager { - async fn get_extra_info(&self) -> Option { - Some(Info::HummockGcWatermark(self.global_watermark_sst_id())) - } -} - -type AutoTrackerId = u64; - -#[derive(Eq, Hash, PartialEq, Copy, Clone, Debug)] -pub enum TrackerId { - Auto(AutoTrackerId), - Epoch(HummockEpoch), -} - -/// `SstIdTracker` tracks a min(SST id) for various caller, identified by a `TrackerId`. -pub struct SstIdTracker { - auto_id: AtomicU64, - inner: parking_lot::RwLock, -} - -impl SstIdTracker { - fn new() -> Self { - Self { - auto_id: Default::default(), - inner: parking_lot::RwLock::new(SstIdTrackerInner::new()), - } - } - - /// Adds a tracker to track `sst_id`. If a tracker with `tracker_id` already exists, it will - /// track the smallest `sst_id` ever given. - fn add_tracker(&self, tracker_id: TrackerId, sst_id: HummockSstableId) { - self.inner.write().add_tracker(tracker_id, sst_id); - } - - /// Removes given `tracker_id`. - fn remove_tracker(&self, tracker_id: TrackerId) { - self.inner.write().remove_tracker(tracker_id); - } - - fn get_next_auto_tracker_id(&self) -> TrackerId { - TrackerId::Auto(self.auto_id.fetch_add(1, Ordering::Relaxed) + 1) - } - - fn tracking_sst_ids(&self) -> Vec { - self.inner.read().tracking_sst_ids() - } -} - -struct SstIdTrackerInner { - tracking_sst_ids: HashMap, -} - -impl SstIdTrackerInner { - fn new() -> Self { - Self { - tracking_sst_ids: Default::default(), - } - } - - fn add_tracker(&mut self, tracker_id: TrackerId, sst_id: HummockSstableId) { - match self.tracking_sst_ids.entry(tracker_id) { - Entry::Occupied(mut o) => { - *o.get_mut() = cmp::min(*o.get_mut(), sst_id); - } - Entry::Vacant(v) => { - v.insert(sst_id); - } - } - } - - fn remove_tracker(&mut self, tracker_id: TrackerId) { - match &tracker_id { - TrackerId::Auto(_) => { - self.tracking_sst_ids.remove(&tracker_id); - } - TrackerId::Epoch(max_epoch) => self.tracking_sst_ids.retain(|id, _| match id { - TrackerId::Auto(_) => true, - TrackerId::Epoch(epoch) => *epoch > *max_epoch, - }), - } - } - - fn tracking_sst_ids(&self) -> Vec { - self.tracking_sst_ids.values().cloned().collect_vec() - } -} - -#[cfg(test)] -mod test { - - use risingwave_common::try_match_expand; - - use crate::hummock::sstable::sstable_id_manager::AutoTrackerId; - use crate::hummock::{SstIdTracker, TrackerId}; - - #[tokio::test] - async fn test_sst_id_tracker_basic() { - let sst_id_tacker = SstIdTracker::new(); - assert!(sst_id_tacker.tracking_sst_ids().is_empty()); - let auto_id = - try_match_expand!(sst_id_tacker.get_next_auto_tracker_id(), TrackerId::Auto).unwrap(); - assert_eq!(auto_id, AutoTrackerId::MIN + 1); - - let auto_id_1 = sst_id_tacker.get_next_auto_tracker_id(); - let auto_id_2 = sst_id_tacker.get_next_auto_tracker_id(); - let auto_id_3 = sst_id_tacker.get_next_auto_tracker_id(); - - sst_id_tacker.add_tracker(auto_id_1, 10); - assert_eq!( - sst_id_tacker.tracking_sst_ids().into_iter().min().unwrap(), - 10 - ); - - // OK to move SST id backwards. - sst_id_tacker.add_tracker(auto_id_1, 9); - sst_id_tacker.add_tracker(auto_id_2, 9); - - // OK to add same id to the same tracker - sst_id_tacker.add_tracker(auto_id_1, 10); - // OK to add same id to another tracker - sst_id_tacker.add_tracker(auto_id_2, 10); - - sst_id_tacker.add_tracker(auto_id_3, 20); - sst_id_tacker.add_tracker(auto_id_2, 30); - // Tracker 1 and 2 both hold id 9. - assert_eq!( - sst_id_tacker.tracking_sst_ids().into_iter().min().unwrap(), - 9 - ); - - sst_id_tacker.remove_tracker(auto_id_1); - // Tracker 2 still holds 9. - assert_eq!( - sst_id_tacker.tracking_sst_ids().into_iter().min().unwrap(), - 9 - ); - - sst_id_tacker.remove_tracker(auto_id_2); - assert_eq!( - sst_id_tacker.tracking_sst_ids().into_iter().min().unwrap(), - 20 - ); - - sst_id_tacker.remove_tracker(auto_id_3); - assert!(sst_id_tacker.tracking_sst_ids().is_empty()); - } -} diff --git a/src/storage/src/hummock/sstable/sstable_object_id_manager.rs b/src/storage/src/hummock/sstable/sstable_object_id_manager.rs new file mode 100644 index 0000000000000..d37cceb3f8bc9 --- /dev/null +++ b/src/storage/src/hummock/sstable/sstable_object_id_manager.rs @@ -0,0 +1,359 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::ops::DerefMut; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +use itertools::Itertools; +use parking_lot::Mutex; +use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId, SstObjectIdRange}; +use risingwave_pb::meta::heartbeat_request::extra_info::Info; +use risingwave_rpc_client::{ExtraInfoSource, HummockMetaClient}; +use sync_point::sync_point; +use tokio::sync::oneshot; + +use crate::hummock::{HummockError, HummockResult}; + +pub type SstableObjectIdManagerRef = Arc; + +/// 1. Caches SST object ids fetched from meta. +/// 2. Maintains GC watermark SST object id. +/// +/// During full GC, SST in object store with object id >= watermark SST object id will be excluded +/// from orphan SST object candidate and thus won't be deleted. +pub struct SstableObjectIdManager { + // Lock order: `wait_queue` before `available_sst_object_ids`. + wait_queue: Mutex>>>, + available_sst_object_ids: Mutex, + remote_fetch_number: u32, + hummock_meta_client: Arc, + object_id_tracker: SstObjectIdTracker, +} + +impl SstableObjectIdManager { + pub fn new(hummock_meta_client: Arc, remote_fetch_number: u32) -> Self { + Self { + wait_queue: Default::default(), + available_sst_object_ids: Mutex::new(SstObjectIdRange::new( + HummockSstableObjectId::MIN, + HummockSstableObjectId::MIN, + )), + remote_fetch_number, + hummock_meta_client, + object_id_tracker: SstObjectIdTracker::new(), + } + } + + /// Returns a new SST id. + /// The id is guaranteed to be monotonic increasing. + pub async fn get_new_sst_object_id(self: &Arc) -> HummockResult { + self.map_next_sst_object_id(|available_sst_object_ids| { + available_sst_object_ids.get_next_sst_object_id() + }) + .await + } + + /// Executes `f` with next SST id. + /// May fetch new SST ids via RPC. + async fn map_next_sst_object_id( + self: &Arc, + f: F, + ) -> HummockResult + where + F: Fn(&mut SstObjectIdRange) -> Option, + { + loop { + // 1. Try to get + if let Some(new_id) = f(self.available_sst_object_ids.lock().deref_mut()) { + return Ok(new_id); + } + // 2. Otherwise either fetch new ids, or wait for previous fetch if any. + let waiter = { + let mut guard = self.wait_queue.lock(); + if let Some(new_id) = f(self.available_sst_object_ids.lock().deref_mut()) { + return Ok(new_id); + } + let wait_queue = guard.deref_mut(); + if let Some(wait_queue) = wait_queue { + let (tx, rx) = oneshot::channel(); + wait_queue.push(tx); + Some(rx) + } else { + *wait_queue = Some(vec![]); + None + } + }; + if let Some(waiter) = waiter { + // Wait for previous fetch + sync_point!("MAP_NEXT_SST_OBJECT_ID.AS_FOLLOWER"); + let _ = waiter.await; + continue; + } + // Fetch new ids. + sync_point!("MAP_NEXT_SST_OBJECT_ID.AS_LEADER"); + sync_point!("MAP_NEXT_SST_OBJECT_ID.BEFORE_FETCH"); + let this = self.clone(); + tokio::spawn(async move { + let new_sst_ids = match this + .hummock_meta_client + .get_new_sst_ids(this.remote_fetch_number) + .await + .map_err(HummockError::meta_error) + { + Ok(new_sst_ids) => new_sst_ids, + Err(err) => { + this.notify_waiters(false); + return Err(err); + } + }; + sync_point!("MAP_NEXT_SST_OBJECT_ID.AFTER_FETCH"); + sync_point!("MAP_NEXT_SST_OBJECT_ID.BEFORE_FILL_CACHE"); + // Update local cache. + let result = { + let mut guard = this.available_sst_object_ids.lock(); + let available_sst_object_ids = guard.deref_mut(); + if new_sst_ids.start_id < available_sst_object_ids.end_id { + Err(HummockError::meta_error(format!( + "SST id moves backwards. new {} < old {}", + new_sst_ids.start_id, available_sst_object_ids.end_id + ))) + } else { + *available_sst_object_ids = new_sst_ids; + Ok(()) + } + }; + this.notify_waiters(result.is_ok()); + result + }) + .await + .unwrap()?; + } + } + + /// Adds a new watermark SST id using the next unused SST id. + /// Returns a tracker id. It's used to remove the watermark later. + /// - Uses given 'epoch' as tracker id if provided. + /// - Uses a generated tracker id otherwise. + pub async fn add_watermark_object_id( + self: &Arc, + epoch: Option, + ) -> HummockResult { + let tracker_id = match epoch { + None => self.object_id_tracker.get_next_auto_tracker_id(), + Some(epoch) => TrackerId::Epoch(epoch), + }; + let next_sst_object_id = self + .map_next_sst_object_id(|available_sst_object_ids| { + available_sst_object_ids.peek_next_sst_object_id() + }) + .await?; + self.object_id_tracker + .add_tracker(tracker_id, next_sst_object_id); + Ok(tracker_id) + } + + pub fn remove_watermark_object_id(&self, tracker_id: TrackerId) { + self.object_id_tracker.remove_tracker(tracker_id); + } + + /// Returns GC watermark. It equals + /// - min(effective watermarks), if number of effective watermarks > 0. + /// - `HummockSstableObjectId::MAX`, if no effective watermark. + pub fn global_watermark_object_id(&self) -> HummockSstableObjectId { + self.object_id_tracker + .tracking_object_ids() + .into_iter() + .min() + .unwrap_or(HummockSstableObjectId::MAX) + } + + fn notify_waiters(&self, success: bool) { + let mut guard = self.wait_queue.lock(); + let wait_queue = guard.deref_mut().take().unwrap(); + for notify in wait_queue { + let _ = notify.send(success); + } + } +} + +#[async_trait::async_trait] +impl ExtraInfoSource for SstableObjectIdManager { + async fn get_extra_info(&self) -> Option { + Some(Info::HummockGcWatermark(self.global_watermark_object_id())) + } +} + +type AutoTrackerId = u64; + +#[derive(Eq, Hash, PartialEq, Copy, Clone, Debug)] +pub enum TrackerId { + Auto(AutoTrackerId), + Epoch(HummockEpoch), +} + +/// `SstObjectIdTracker` tracks a min(SST object id) for various caller, identified by a +/// `TrackerId`. +pub struct SstObjectIdTracker { + auto_id: AtomicU64, + inner: parking_lot::RwLock, +} + +impl SstObjectIdTracker { + fn new() -> Self { + Self { + auto_id: Default::default(), + inner: parking_lot::RwLock::new(SstObjectIdTrackerInner::new()), + } + } + + /// Adds a tracker to track `object_id`. If a tracker with `tracker_id` already exists, it will + /// track the smallest `object_id` ever given. + fn add_tracker(&self, tracker_id: TrackerId, object_id: HummockSstableObjectId) { + self.inner.write().add_tracker(tracker_id, object_id); + } + + /// Removes given `tracker_id`. + fn remove_tracker(&self, tracker_id: TrackerId) { + self.inner.write().remove_tracker(tracker_id); + } + + fn get_next_auto_tracker_id(&self) -> TrackerId { + TrackerId::Auto(self.auto_id.fetch_add(1, Ordering::Relaxed) + 1) + } + + fn tracking_object_ids(&self) -> Vec { + self.inner.read().tracking_object_ids() + } +} + +struct SstObjectIdTrackerInner { + tracking_object_ids: HashMap, +} + +impl SstObjectIdTrackerInner { + fn new() -> Self { + Self { + tracking_object_ids: Default::default(), + } + } + + fn add_tracker(&mut self, tracker_id: TrackerId, object_id: HummockSstableObjectId) { + match self.tracking_object_ids.entry(tracker_id) { + Entry::Occupied(mut o) => { + *o.get_mut() = cmp::min(*o.get_mut(), object_id); + } + Entry::Vacant(v) => { + v.insert(object_id); + } + } + } + + fn remove_tracker(&mut self, tracker_id: TrackerId) { + match &tracker_id { + TrackerId::Auto(_) => { + self.tracking_object_ids.remove(&tracker_id); + } + TrackerId::Epoch(max_epoch) => self.tracking_object_ids.retain(|id, _| match id { + TrackerId::Auto(_) => true, + TrackerId::Epoch(epoch) => *epoch > *max_epoch, + }), + } + } + + fn tracking_object_ids(&self) -> Vec { + self.tracking_object_ids.values().cloned().collect_vec() + } +} + +#[cfg(test)] +mod test { + + use risingwave_common::try_match_expand; + + use crate::hummock::sstable::sstable_object_id_manager::AutoTrackerId; + use crate::hummock::{SstObjectIdTracker, TrackerId}; + + #[tokio::test] + async fn test_object_id_tracker_basic() { + let object_id_tacker = SstObjectIdTracker::new(); + assert!(object_id_tacker.tracking_object_ids().is_empty()); + let auto_id = + try_match_expand!(object_id_tacker.get_next_auto_tracker_id(), TrackerId::Auto) + .unwrap(); + assert_eq!(auto_id, AutoTrackerId::MIN + 1); + + let auto_id_1 = object_id_tacker.get_next_auto_tracker_id(); + let auto_id_2 = object_id_tacker.get_next_auto_tracker_id(); + let auto_id_3 = object_id_tacker.get_next_auto_tracker_id(); + + object_id_tacker.add_tracker(auto_id_1, 10); + assert_eq!( + object_id_tacker + .tracking_object_ids() + .into_iter() + .min() + .unwrap(), + 10 + ); + + // OK to move SST id backwards. + object_id_tacker.add_tracker(auto_id_1, 9); + object_id_tacker.add_tracker(auto_id_2, 9); + + // OK to add same id to the same tracker + object_id_tacker.add_tracker(auto_id_1, 10); + // OK to add same id to another tracker + object_id_tacker.add_tracker(auto_id_2, 10); + + object_id_tacker.add_tracker(auto_id_3, 20); + object_id_tacker.add_tracker(auto_id_2, 30); + // Tracker 1 and 2 both hold id 9. + assert_eq!( + object_id_tacker + .tracking_object_ids() + .into_iter() + .min() + .unwrap(), + 9 + ); + + object_id_tacker.remove_tracker(auto_id_1); + // Tracker 2 still holds 9. + assert_eq!( + object_id_tacker + .tracking_object_ids() + .into_iter() + .min() + .unwrap(), + 9 + ); + + object_id_tacker.remove_tracker(auto_id_2); + assert_eq!( + object_id_tacker + .tracking_object_ids() + .into_iter() + .min() + .unwrap(), + 20 + ); + + object_id_tacker.remove_tracker(auto_id_3); + assert!(object_id_tacker.tracking_object_ids().is_empty()); + } +} diff --git a/src/storage/src/hummock/sstable_store.rs b/src/storage/src/hummock/sstable_store.rs index 22a14708dea01..887fa8d27cd9b 100644 --- a/src/storage/src/hummock/sstable_store.rs +++ b/src/storage/src/hummock/sstable_store.rs @@ -20,7 +20,7 @@ use bytes::{Buf, BufMut, Bytes}; use fail::fail_point; use itertools::Itertools; use risingwave_common::cache::LruCacheEventListener; -use risingwave_hummock_sdk::HummockSstableId; +use risingwave_hummock_sdk::HummockSstableObjectId; use risingwave_object_store::object::{ BlockLocation, MonitoredStreamingReader, ObjectError, ObjectMetadata, ObjectStoreRef, ObjectStreamingUploader, @@ -44,11 +44,11 @@ const MAX_META_CACHE_SHARD_BITS: usize = 2; const MAX_CACHE_SHARD_BITS: usize = 6; // It means that there will be 64 shards lru-cache to avoid lock conflict. const MIN_BUFFER_SIZE_PER_SHARD: usize = 256 * 1024 * 1024; // 256MB -pub type TableHolder = CacheableEntry>; +pub type TableHolder = CacheableEntry>; // BEGIN section for tiered cache -impl TieredCacheKey for (HummockSstableId, u64) { +impl TieredCacheKey for (HummockSstableObjectId, u64) { fn encoded_len() -> usize { 16 } @@ -59,9 +59,9 @@ impl TieredCacheKey for (HummockSstableId, u64) { } fn decode(mut buf: &[u8]) -> Self { - let sst_id = buf.get_u64(); + let object_id = buf.get_u64(); let block_idx = buf.get_u64(); - (sst_id, block_idx) + (object_id, block_idx) } } @@ -84,11 +84,11 @@ impl TieredCacheValue for Box { } pub struct BlockCacheEventListener { - tiered_cache: TieredCache<(HummockSstableId, u64), Box>, + tiered_cache: TieredCache<(HummockSstableObjectId, u64), Box>, } impl LruCacheEventListener for BlockCacheEventListener { - type K = (HummockSstableId, u64); + type K = (HummockSstableObjectId, u64); type T = Box; fn on_release(&self, key: Self::K, value: Self::T) { @@ -115,8 +115,8 @@ pub struct SstableStore { path: String, store: ObjectStoreRef, block_cache: BlockCache, - meta_cache: Arc>>, - tiered_cache: TieredCache<(HummockSstableId, u64), Box>, + meta_cache: Arc>>, + tiered_cache: TieredCache<(HummockSstableObjectId, u64), Box>, } impl SstableStore { @@ -126,7 +126,7 @@ impl SstableStore { block_cache_capacity: usize, meta_cache_capacity: usize, high_priority_ratio: usize, - tiered_cache: TieredCache<(HummockSstableId, u64), Box>, + tiered_cache: TieredCache<(HummockSstableObjectId, u64), Box>, ) -> Self { // TODO: We should validate path early. Otherwise object store won't report invalid path // error until first write attempt. @@ -172,39 +172,46 @@ impl SstableStore { } } - pub async fn delete(&self, sst_id: HummockSstableId) -> HummockResult<()> { + pub async fn delete(&self, object_id: HummockSstableObjectId) -> HummockResult<()> { // Data self.store - .delete(self.get_sst_data_path(sst_id).as_str()) + .delete(self.get_sst_data_path(object_id).as_str()) .await?; - self.meta_cache.erase(sst_id, &sst_id); + self.meta_cache.erase(object_id, &object_id); Ok(()) } /// Deletes all SSTs specified in the given list of IDs from storage and cache. - pub async fn delete_list(&self, sst_id_list: &[HummockSstableId]) -> HummockResult<()> { - let mut paths = Vec::with_capacity(sst_id_list.len() * 2); + pub async fn delete_list( + &self, + object_id_list: &[HummockSstableObjectId], + ) -> HummockResult<()> { + let mut paths = Vec::with_capacity(object_id_list.len() * 2); - for &sst_id in sst_id_list { - paths.push(self.get_sst_data_path(sst_id)); + for &object_id in object_id_list { + paths.push(self.get_sst_data_path(object_id)); } // Delete from storage. self.store.delete_objects(&paths).await?; // Delete from cache. - for &sst_id in sst_id_list { - self.meta_cache.erase(sst_id, &sst_id); + for &object_id in object_id_list { + self.meta_cache.erase(object_id, &object_id); } Ok(()) } - pub fn delete_cache(&self, sst_id: HummockSstableId) { - self.meta_cache.erase(sst_id, &sst_id); + pub fn delete_cache(&self, object_id: HummockSstableObjectId) { + self.meta_cache.erase(object_id, &object_id); } - async fn put_sst_data(&self, sst_id: HummockSstableId, data: Bytes) -> HummockResult<()> { - let data_path = self.get_sst_data_path(sst_id); + async fn put_sst_data( + &self, + object_id: HummockSstableObjectId, + data: Bytes, + ) -> HummockResult<()> { + let data_path = self.get_sst_data_path(object_id); self.store .upload(&data_path, data) .await @@ -218,20 +225,20 @@ impl SstableStore { policy: CachePolicy, stats: &mut StoreLocalStatistic, ) -> HummockResult { - let sst_id = sst.id; + let object_id = sst.id; let (block_loc, uncompressed_capacity) = sst.calculate_block_info(block_index); stats.cache_data_block_total += 1; let mut fetch_block = || { let tiered_cache = self.tiered_cache.clone(); stats.cache_data_block_miss += 1; - let data_path = self.get_sst_data_path(sst_id); + let data_path = self.get_sst_data_path(object_id); let store = self.store.clone(); let use_tiered_cache = !matches!(policy, CachePolicy::Disable); async move { if use_tiered_cache && let Some(holder) = tiered_cache - .get(&(sst_id, block_index as u64)) + .get(&(object_id, block_index as u64)) .await .map_err(HummockError::tiered_cache)? { @@ -258,16 +265,16 @@ impl SstableStore { match policy { CachePolicy::Fill(high_priority) => Ok(self.block_cache.get_or_insert_with( - sst_id, + object_id, block_index as u64, high_priority, fetch_block, )), - CachePolicy::NotFill => match self.block_cache.get(sst_id, block_index as u64) { + CachePolicy::NotFill => match self.block_cache.get(object_id, block_index as u64) { Some(block) => Ok(BlockResponse::Block(block)), None => match self .tiered_cache - .get(&(sst_id, block_index as u64)) + .get(&(object_id, block_index as u64)) .await .map_err(HummockError::tiered_cache)? { @@ -303,17 +310,17 @@ impl SstableStore { } } - pub fn get_sst_data_path(&self, sst_id: HummockSstableId) -> String { - let obj_prefix = self.store.get_object_prefix(sst_id, true); - format!("{}/{}{}.data", self.path, obj_prefix, sst_id) + pub fn get_sst_data_path(&self, object_id: HummockSstableObjectId) -> String { + let obj_prefix = self.store.get_object_prefix(object_id, true); + format!("{}/{}{}.data", self.path, obj_prefix, object_id) } - pub fn get_sst_id_from_path(&self, path: &str) -> HummockSstableId { + pub fn get_object_id_from_path(&self, path: &str) -> HummockSstableObjectId { let split = path.split(&['/', '.']).collect_vec(); debug_assert!(split.len() > 2); debug_assert!(split[split.len() - 1] == "meta" || split[split.len() - 1] == "data"); split[split.len() - 2] - .parse::() + .parse::() .expect("valid sst id") } @@ -321,7 +328,7 @@ impl SstableStore { self.store.clone() } - pub fn get_meta_cache(&self) -> Arc>> { + pub fn get_meta_cache(&self) -> Arc>> { self.meta_cache.clone() } @@ -345,12 +352,12 @@ impl SstableStore { stats: &StoreLocalStatistic, ) -> HummockResult<(TableHolder, u64)> { let mut local_cache_meta_block_miss = 0; - let sst_id = sst.id; + let object_id = sst.get_object_id(); let result = self .meta_cache - .lookup_with_request_dedup::<_, HummockError, _>(sst_id, sst_id, false, || { + .lookup_with_request_dedup::<_, HummockError, _>(object_id, object_id, false, || { let store = self.store.clone(); - let meta_path = self.get_sst_data_path(sst_id); + let meta_path = self.get_sst_data_path(object_id); local_cache_meta_block_miss += 1; let stats_ptr = stats.remote_io_time.clone(); let loc = BlockLocation { @@ -364,7 +371,7 @@ impl SstableStore { .await .map_err(HummockError::object_io_error)?; let meta = SstableMeta::decode(&mut &buf[..])?; - let sst = Sstable::new(sst_id, meta); + let sst = Sstable::new(object_id, meta); let charge = sst.estimate_size(); let add = (now.elapsed().as_secs_f64() * 1000.0).ceil(); stats_ptr.fetch_add(add as u64, Ordering::Relaxed); @@ -398,17 +405,26 @@ impl SstableStore { pub fn create_sst_writer( self: Arc, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, options: SstableWriterOptions, ) -> BatchUploadWriter { - BatchUploadWriter::new(sst_id, self, options) + BatchUploadWriter::new(object_id, self, options) } - pub fn insert_meta_cache(&self, sst_id: HummockSstableId, meta: SstableMeta) { - let sst = Sstable::new(sst_id, meta); + pub fn insert_meta_cache(&self, object_id: HummockSstableObjectId, meta: SstableMeta) { + let sst = Sstable::new(object_id, meta); let charge = sst.estimate_size(); self.meta_cache - .insert(sst_id, sst_id, charge, Box::new(sst), false); + .insert(object_id, object_id, charge, Box::new(sst), true); + } + + pub fn insert_block_cache( + &self, + object_id: HummockSstableObjectId, + block_index: u64, + block: Box, + ) { + self.block_cache.insert(object_id, block_index, block, true); } pub fn get_meta_memory_usage(&self) -> u64 { @@ -489,7 +505,7 @@ pub trait SstableWriterFactory: Send + Sync { fn create_sst_writer( &self, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, options: SstableWriterOptions, ) -> HummockResult; } @@ -509,11 +525,11 @@ impl SstableWriterFactory for BatchSstableWriterFactory { fn create_sst_writer( &self, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, options: SstableWriterOptions, ) -> HummockResult { Ok(BatchUploadWriter::new( - sst_id, + object_id, self.sstable_store.clone(), options, )) @@ -523,7 +539,7 @@ impl SstableWriterFactory for BatchSstableWriterFactory { /// Buffer SST data and upload it as a whole on `finish`. /// The upload is finished when the returned `JoinHandle` is joined. pub struct BatchUploadWriter { - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, sstable_store: SstableStoreRef, policy: CachePolicy, buf: Vec, @@ -533,12 +549,12 @@ pub struct BatchUploadWriter { impl BatchUploadWriter { pub fn new( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, sstable_store: Arc, options: SstableWriterOptions, ) -> Self { Self { - sst_id, + object_id, sstable_store, policy: options.policy, buf: Vec::with_capacity(options.capacity_hint.unwrap_or(0)), @@ -570,8 +586,8 @@ impl SstableWriter for BatchUploadWriter { let data = Bytes::from(self.buf); let _tracker = self.tracker.map(|mut t| { if !t.try_increase_memory(data.capacity() as u64) { - tracing::debug!("failed to allocate increase memory for data file, sst id: {}, file size: {}", - self.sst_id, data.capacity()); + tracing::debug!("failed to allocate increase memory for data file, sst object id: {}, file size: {}", + self.object_id, data.capacity()); } t }); @@ -579,9 +595,9 @@ impl SstableWriter for BatchUploadWriter { // Upload data to object store. self.sstable_store .clone() - .put_sst_data(self.sst_id, data) + .put_sst_data(self.object_id, data) .await?; - self.sstable_store.insert_meta_cache(self.sst_id, meta); + self.sstable_store.insert_meta_cache(self.object_id, meta); // Add block cache. if let CachePolicy::Fill(fill_cache_priority) = self.policy { @@ -589,7 +605,7 @@ impl SstableWriter for BatchUploadWriter { // store them in meta-block. for (block_idx, block) in self.block_info.into_iter().enumerate() { self.sstable_store.block_cache.insert( - self.sst_id, + self.object_id, block_idx as u64, Box::new(block), fill_cache_priority, @@ -607,7 +623,7 @@ impl SstableWriter for BatchUploadWriter { } pub struct StreamingUploadWriter { - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, sstable_store: SstableStoreRef, policy: CachePolicy, /// Data are uploaded block by block, except for the size footer. @@ -620,13 +636,13 @@ pub struct StreamingUploadWriter { impl StreamingUploadWriter { pub fn new( - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, sstable_store: SstableStoreRef, object_uploader: ObjectStreamingUploader, options: SstableWriterOptions, ) -> Self { Self { - sst_id, + object_id, sstable_store, policy: options.policy, object_uploader, @@ -665,8 +681,8 @@ impl SstableWriter for StreamingUploadWriter { let uploader_memory_usage = self.object_uploader.get_memory_usage(); let _tracker = self.tracker.map(|mut t| { if !t.try_increase_memory(uploader_memory_usage) { - tracing::debug!("failed to allocate increase memory for data file, sst id: {}, file size: {}", - self.sst_id, uploader_memory_usage); + tracing::debug!("failed to allocate increase memory for data file, sst object id: {}, file size: {}", + self.object_id, uploader_memory_usage); } t }); @@ -676,14 +692,14 @@ impl SstableWriter for StreamingUploadWriter { .finish() .await .map_err(HummockError::object_io_error)?; - self.sstable_store.insert_meta_cache(self.sst_id, meta); + self.sstable_store.insert_meta_cache(self.object_id, meta); // Add block cache. if let CachePolicy::Fill(fill_high_priority_cache) = self.policy { debug_assert!(!self.blocks.is_empty()); for (block_idx, block) in self.blocks.into_iter().enumerate() { self.sstable_store.block_cache.insert( - self.sst_id, + self.object_id, block_idx as u64, Box::new(block), fill_high_priority_cache, @@ -715,13 +731,13 @@ impl SstableWriterFactory for StreamingSstableWriterFactory { fn create_sst_writer( &self, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, options: SstableWriterOptions, ) -> HummockResult { - let path = self.sstable_store.get_sst_data_path(sst_id); + let path = self.sstable_store.get_sst_data_path(object_id); let uploader = self.sstable_store.store.streaming_upload(&path)?; Ok(StreamingUploadWriter::new( - sst_id, + object_id, self.sstable_store.clone(), uploader, options, @@ -855,7 +871,7 @@ mod tests { use std::ops::Range; use std::sync::Arc; - use risingwave_hummock_sdk::HummockSstableId; + use risingwave_hummock_sdk::HummockSstableObjectId; use risingwave_pb::hummock::SstableInfo; use super::{SstableStoreRef, SstableWriterOptions}; @@ -869,7 +885,7 @@ mod tests { use crate::hummock::{CachePolicy, SstableIterator, SstableMeta}; use crate::monitor::StoreLocalStatistic; - const SST_ID: HummockSstableId = 1; + const SST_ID: HummockSstableObjectId = 1; fn get_hummock_value(x: usize) -> HummockValue> { HummockValue::put(format!("overlapped_new_{}", x).as_bytes().to_vec()) @@ -964,9 +980,9 @@ mod tests { #[test] fn test_basic() { let sstable_store = mock_sstable_store(); - let sst_id = 123; - let data_path = sstable_store.get_sst_data_path(sst_id); + let object_id = 123; + let data_path = sstable_store.get_sst_data_path(object_id); assert_eq!(data_path, "test/123.data"); - assert_eq!(sstable_store.get_sst_id_from_path(&data_path), sst_id); + assert_eq!(sstable_store.get_object_id_from_path(&data_path), object_id); } } diff --git a/src/storage/src/hummock/store/state_store.rs b/src/storage/src/hummock/store/state_store.rs index d7952beb8b683..deaebb8d289ba 100644 --- a/src/storage/src/hummock/store/state_store.rs +++ b/src/storage/src/hummock/store/state_store.rs @@ -41,6 +41,7 @@ use crate::hummock::utils::{ do_delete_sanity_check, do_insert_sanity_check, do_update_sanity_check, filter_with_delete_range, ENABLE_SANITY_CHECK, }; +use crate::hummock::write_limiter::WriteLimiterRef; use crate::hummock::{MemoryLimiter, SstableIterator}; use crate::mem_table::{merge_stream, KeyOp, MemTable}; use crate::monitor::{HummockStateStoreMetrics, IterLocalMetricsGuard, StoreLocalStatistic}; @@ -75,6 +76,8 @@ pub struct LocalHummockStorage { tracing: Arc, stats: Arc, + + write_limiter: WriteLimiterRef, } impl LocalHummockStorage { @@ -359,6 +362,7 @@ impl LocalHummockStorage { let sorted_items = SharedBufferBatch::build_shared_buffer_item_batches(kv_pairs); let size = SharedBufferBatch::measure_batch_size(&sorted_items); + self.write_limiter.wait_permission(self.table_id).await; let limiter = self.memory_limiter.as_ref(); let tracker = if let Some(tracker) = limiter.try_require_memory(size as u64) { tracker @@ -410,6 +414,7 @@ impl LocalHummockStorage { } impl LocalHummockStorage { + #[allow(clippy::too_many_arguments)] pub fn new( instance_guard: LocalInstanceGuard, read_version: Arc>, @@ -417,6 +422,7 @@ impl LocalHummockStorage { event_sender: mpsc::UnboundedSender, memory_limiter: Arc, tracing: Arc, + write_limiter: WriteLimiterRef, option: NewLocalOptions, ) -> Self { let stats = hummock_version_reader.stats().clone(); @@ -433,6 +439,7 @@ impl LocalHummockStorage { hummock_version_reader, tracing, stats, + write_limiter, } } diff --git a/src/storage/src/hummock/store/version.rs b/src/storage/src/hummock/store/version.rs index 929c6715703fc..93ed6683272c6 100644 --- a/src/storage/src/hummock/store/version.rs +++ b/src/storage/src/hummock/store/version.rs @@ -676,7 +676,7 @@ impl HummockVersionReader { for sstable_info in fetch_meta_req { let (sstable, local_cache_meta_block_miss) = flatten_resps.pop().unwrap().unwrap(); - assert_eq!(sstable_info.id, sstable.value().id); + assert_eq!(sstable_info.get_object_id(), sstable.value().id); local_stats.apply_meta_fetch(local_cache_meta_block_miss); if !sstable.value().meta.range_tombstone_list.is_empty() && !read_options.ignore_range_tombstone @@ -702,7 +702,7 @@ impl HummockVersionReader { for sstable_info in fetch_meta_req { let (sstable, local_cache_meta_block_miss) = flatten_resps.pop().unwrap().unwrap(); - assert_eq!(sstable_info.id, sstable.value().id); + assert_eq!(sstable_info.get_object_id(), sstable.value().id); local_stats.apply_meta_fetch(local_cache_meta_block_miss); if !sstable.value().meta.range_tombstone_list.is_empty() && !read_options.ignore_range_tombstone diff --git a/src/storage/src/hummock/test_utils.rs b/src/storage/src/hummock/test_utils.rs index 048f8a512d3bc..13a6523fabbb8 100644 --- a/src/storage/src/hummock/test_utils.rs +++ b/src/storage/src/hummock/test_utils.rs @@ -19,7 +19,7 @@ use futures::{Stream, TryStreamExt}; use itertools::Itertools; use risingwave_common::catalog::TableId; use risingwave_hummock_sdk::key::{FullKey, UserKey}; -use risingwave_hummock_sdk::{HummockEpoch, HummockSstableId}; +use risingwave_hummock_sdk::{HummockEpoch, HummockSstableObjectId}; use risingwave_pb::hummock::{KeyRange, SstableInfo}; use super::iterator::test_utils::iterator_test_table_key_of; @@ -81,7 +81,7 @@ pub fn gen_dummy_batch_several_keys(n: usize) -> Vec<(Bytes, StorageValue)> { } pub fn gen_dummy_sst_info( - id: HummockSstableId, + id: HummockSstableObjectId, batches: Vec, table_id: TableId, epoch: HummockEpoch, @@ -99,7 +99,8 @@ pub fn gen_dummy_sst_info( file_size += batch.size() as u64; } SstableInfo { - id, + object_id: id, + sst_id: id, key_range: Some(KeyRange { left: FullKey::for_test(table_id, min_table_key, epoch).encode(), right: FullKey::for_test(table_id, max_table_key, epoch).encode(), @@ -110,7 +111,6 @@ pub fn gen_dummy_sst_info( meta_offset: 0, stale_key_count: 0, total_key_count: 0, - divide_version: 0, uncompressed_file_size: file_size, min_epoch: 0, max_epoch: 0, @@ -149,7 +149,7 @@ pub async fn gen_test_sstable_data( ) -> (Bytes, SstableMeta) { let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opts), opts); for (key, value) in kv_iter { - b.add(&key.to_ref(), value.as_slice(), true).await.unwrap(); + b.add(key.to_ref(), value.as_slice(), true).await.unwrap(); } let output = b.finish().await.unwrap(); output.writer_output @@ -157,14 +157,16 @@ pub async fn gen_test_sstable_data( /// Write the data and meta to `sstable_store`. pub async fn put_sst( - sst_id: HummockSstableId, + sst_object_id: HummockSstableObjectId, data: Bytes, mut meta: SstableMeta, sstable_store: SstableStoreRef, mut options: SstableWriterOptions, ) -> HummockResult { options.policy = CachePolicy::NotFill; - let mut writer = sstable_store.clone().create_sst_writer(sst_id, options); + let mut writer = sstable_store + .clone() + .create_sst_writer(sst_object_id, options); for block_meta in &meta.block_metas { let offset = block_meta.offset as usize; let end_offset = offset + block_meta.len as usize; @@ -174,7 +176,8 @@ pub async fn put_sst( } meta.meta_offset = writer.data_len() as u64; let sst = SstableInfo { - id: sst_id, + object_id: sst_object_id, + sst_id: sst_object_id, key_range: Some(KeyRange { left: meta.smallest_key.clone(), right: meta.largest_key.clone(), @@ -185,7 +188,6 @@ pub async fn put_sst( meta_offset: meta.meta_offset, stale_key_count: 0, total_key_count: 0, - divide_version: 0, uncompressed_file_size: meta.estimated_size as u64, min_epoch: 0, max_epoch: 0, @@ -198,21 +200,23 @@ pub async fn put_sst( /// Generates a test table from the given `kv_iter` and put the kv value to `sstable_store` pub async fn gen_test_sstable_inner>( opts: SstableBuilderOptions, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, kv_iter: impl Iterator, HummockValue)>, range_tombstones: Vec, sstable_store: SstableStoreRef, policy: CachePolicy, -) -> Sstable { +) -> (Sstable, SstableInfo) { let writer_opts = SstableWriterOptions { capacity_hint: None, tracker: None, policy, }; - let writer = sstable_store.clone().create_sst_writer(sst_id, writer_opts); - let mut b = SstableBuilder::for_test(sst_id, writer, opts); + let writer = sstable_store + .clone() + .create_sst_writer(object_id, writer_opts); + let mut b = SstableBuilder::for_test(object_id, writer, opts); for (key, value) in kv_iter { - b.add(&key.to_ref(), value.as_slice(), true).await.unwrap(); + b.add(key.to_ref(), value.as_slice(), true).await.unwrap(); } b.add_delete_range(range_tombstones); let output = b.finish().await.unwrap(); @@ -224,19 +228,38 @@ pub async fn gen_test_sstable_inner>( ) .await .unwrap(); - table.value().as_ref().clone() + (table.value().as_ref().clone(), output.sst_info.sst_info) } /// Generate a test table from the given `kv_iter` and put the kv value to `sstable_store` pub async fn gen_test_sstable>( opts: SstableBuilderOptions, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, kv_iter: impl Iterator, HummockValue)>, sstable_store: SstableStoreRef, ) -> Sstable { gen_test_sstable_inner( opts, - sst_id, + object_id, + kv_iter, + vec![], + sstable_store, + CachePolicy::NotFill, + ) + .await + .0 +} + +/// Generate a test table from the given `kv_iter` and put the kv value to `sstable_store` +pub async fn gen_test_sstable_and_info>( + opts: SstableBuilderOptions, + object_id: HummockSstableObjectId, + kv_iter: impl Iterator, HummockValue)>, + sstable_store: SstableStoreRef, +) -> (Sstable, SstableInfo) { + gen_test_sstable_inner( + opts, + object_id, kv_iter, vec![], sstable_store, @@ -248,20 +271,21 @@ pub async fn gen_test_sstable>( /// Generate a test table from the given `kv_iter` and put the kv value to `sstable_store` pub async fn gen_test_sstable_with_range_tombstone( opts: SstableBuilderOptions, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, kv_iter: impl Iterator>, HummockValue>)>, range_tombstones: Vec, sstable_store: SstableStoreRef, ) -> Sstable { gen_test_sstable_inner( opts, - sst_id, + object_id, kv_iter, range_tombstones, sstable_store, CachePolicy::NotFill, ) .await + .0 } /// Generates a user key with table id 0 and the given `table_key` @@ -299,12 +323,12 @@ pub fn test_value_of(idx: usize) -> Vec { /// generated by `test_key_of` and `test_value_of`. pub async fn gen_default_test_sstable( opts: SstableBuilderOptions, - sst_id: HummockSstableId, + object_id: HummockSstableObjectId, sstable_store: SstableStoreRef, ) -> Sstable { gen_test_sstable( opts, - sst_id, + object_id, (0..TEST_KEYS_COUNT).map(|i| (test_key_of(i), HummockValue::put(test_value_of(i)))), sstable_store, ) @@ -320,6 +344,6 @@ pub async fn count_stream(s: impl Stream> + Send) -> c } -pub fn create_small_table_cache() -> Arc>> { +pub fn create_small_table_cache() -> Arc>> { Arc::new(LruCache::new(1, 4, 0)) } diff --git a/src/storage/src/hummock/utils.rs b/src/storage/src/hummock/utils.rs index f99c37a305101..cb2a0fb29fe58 100644 --- a/src/storage/src/hummock/utils.rs +++ b/src/storage/src/hummock/utils.rs @@ -84,7 +84,7 @@ pub fn validate_table_key_range(version: &HummockVersion) { assert!( t.key_range.is_some(), "key_range in table [{}] is none", - t.id + t.get_object_id() ); } } diff --git a/src/storage/src/hummock/vacuum.rs b/src/storage/src/hummock/vacuum.rs index 832502d24f139..befeeb6c77aeb 100644 --- a/src/storage/src/hummock/vacuum.rs +++ b/src/storage/src/hummock/vacuum.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use itertools::Itertools; -use risingwave_hummock_sdk::HummockSstableId; +use risingwave_hummock_sdk::HummockSstableObjectId; use risingwave_object_store::object::ObjectMetadata; use risingwave_pb::hummock::{FullScanTask, VacuumTask}; use risingwave_rpc_client::HummockMetaClient; @@ -35,7 +35,7 @@ impl Vacuum { sstable_store: SstableStoreRef, hummock_meta_client: Arc, ) -> bool { - tracing::info!("Try to vacuum SSTs {:?}", vacuum_task.sstable_ids); + tracing::info!("Try to vacuum SSTs {:?}", vacuum_task.sstable_object_ids); match Vacuum::vacuum_inner( vacuum_task, sstable_store.clone(), @@ -59,11 +59,11 @@ impl Vacuum { sstable_store: SstableStoreRef, hummock_meta_client: Arc, ) -> HummockResult<()> { - let sst_ids = vacuum_task.sstable_ids; - sstable_store.delete_list(&sst_ids).await?; + let object_ids = vacuum_task.sstable_object_ids; + sstable_store.delete_list(&object_ids).await?; hummock_meta_client .report_vacuum_task(VacuumTask { - sstable_ids: sst_ids, + sstable_object_ids: object_ids, }) .await .map_err(|e| { @@ -93,9 +93,9 @@ impl Vacuum { } }; - let sst_ids = + let object_ids = Vacuum::full_scan_inner(full_scan_task, object_metadata, sstable_store.clone()); - match hummock_meta_client.report_full_scan_task(sst_ids).await { + match hummock_meta_client.report_full_scan_task(object_ids).await { Ok(_) => { tracing::info!("Finished full scan SSTs"); } @@ -111,7 +111,7 @@ impl Vacuum { full_scan_task: FullScanTask, object_metadata: Vec, sstable_store: SstableStoreRef, - ) -> Vec { + ) -> Vec { let timestamp_watermark = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() @@ -120,7 +120,7 @@ impl Vacuum { object_metadata .into_iter() .filter(|o| o.last_modified < timestamp_watermark) - .map(|o| sstable_store.get_sst_id_from_path(&o.key)) + .map(|o| sstable_store.get_object_id_from_path(&o.key)) .dedup() .collect_vec() } diff --git a/src/storage/src/hummock/validator.rs b/src/storage/src/hummock/validator.rs index 5012750222c4f..de14532414ce2 100644 --- a/src/storage/src/hummock/validator.rs +++ b/src/storage/src/hummock/validator.rs @@ -36,11 +36,11 @@ pub async fn validate_ssts(task: ValidationTask, sstable_store: SstableStoreRef) let mut key_counts = 0; let worker_id = *task .sst_id_to_worker_id - .get(&sst.id) + .get(&sst.object_id) .expect("valid worker_id"); tracing::debug!( "Validating SST {} from worker {}, epoch {}", - sst.id, + sst.get_object_id(), worker_id, task.epoch ); @@ -48,7 +48,7 @@ pub async fn validate_ssts(task: ValidationTask, sstable_store: SstableStoreRef) Ok(holder) => holder, Err(err) => { // One reasonable cause is the SST has been vacuumed. - tracing::warn!("Skip sanity check for SST {}. {}", sst.id, err); + tracing::warn!("Skip sanity check for SST {}. {}", sst.get_object_id(), err); continue; } }; @@ -62,43 +62,50 @@ pub async fn validate_ssts(task: ValidationTask, sstable_store: SstableStoreRef) ); let mut previous_key: Option>> = None; if let Err(err) = iter.rewind().await { - tracing::warn!("Skip sanity check for SST {}. {}", sst.id, err); + tracing::warn!("Skip sanity check for SST {}. {}", sst.get_object_id(), err); } while iter.is_valid() { key_counts += 1; let current_key = iter.key().to_vec(); // Locally unique and Globally unique - if let Some((duplicate_sst_id, duplicate_worker_id)) = + if let Some((duplicate_sst_object_id, duplicate_worker_id)) = visited_keys.get(¤t_key).cloned() { - panic!("SST sanity check failed: Duplicate key {:x?} in SST {} from worker {} and SST {} from worker {}", + panic!("SST sanity check failed: Duplicate key {:x?} in SST object {} from worker {} and SST object {} from worker {}", current_key, - sst.id, + sst.get_object_id(), worker_id, - duplicate_sst_id, + duplicate_sst_object_id, duplicate_worker_id) } - visited_keys.insert(current_key.to_owned(), (sst.id, worker_id)); + visited_keys.insert(current_key.to_owned(), (sst.get_object_id(), worker_id)); // Ordered and Locally unique if let Some(previous_key) = previous_key.take() { let cmp = previous_key.cmp(¤t_key); if cmp != cmp::Ordering::Less { panic!( "SST sanity check failed: For SST {}, expect {:x?} < {:x?}, got {:#?}", - sst.id, previous_key, current_key, cmp + sst.get_object_id(), + previous_key, + current_key, + cmp ) } } previous_key = Some(current_key); if let Err(err) = iter.next().await { - tracing::warn!("Skip remaining sanity check for SST {}. {}", sst.id, err); + tracing::warn!( + "Skip remaining sanity check for SST {}. {}", + sst.get_object_id(), + err + ); break; } } tracing::debug!( "Validated {} keys for SST {}, epoch {}", key_counts, - sst.id, + sst.get_object_id(), task.epoch ); iter.collect_local_statistic(&mut unused); diff --git a/src/storage/src/hummock/value.rs b/src/storage/src/hummock/value.rs index 70f32bba0e0c8..d5eae5ad0deac 100644 --- a/src/storage/src/hummock/value.rs +++ b/src/storage/src/hummock/value.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::fmt::Debug; + use bytes::{Buf, BufMut, Bytes}; use super::{HummockError, HummockResult}; @@ -24,12 +26,23 @@ pub const VALUE_PUT: u8 = 0; /// /// Its encoding is a 1-byte flag + storage value. For `Put`, storage value contains both value meta /// and user value. For `Delete`, storage value contains only value meta. -#[derive(Debug, Clone)] +#[derive(Clone)] pub enum HummockValue { Put(T), Delete, } +impl> Debug for HummockValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + HummockValue::Put(v) => { + write!(f, "HummockValue {{ PUT, {} }}", hex::encode(v.as_ref())) + } + HummockValue::Delete => write!(f, "HummockValue {{ DELETE }}"), + } + } +} + impl Copy for HummockValue where T: Copy {} impl PartialEq for HummockValue { diff --git a/src/storage/src/hummock/write_limiter.rs b/src/storage/src/hummock/write_limiter.rs new file mode 100644 index 0000000000000..4f832b8fe8333 --- /dev/null +++ b/src/storage/src/hummock/write_limiter.rs @@ -0,0 +1,104 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::sync::Arc; + +use arc_swap::ArcSwap; +use risingwave_common::catalog::TableId; +use risingwave_hummock_sdk::CompactionGroupId; +use risingwave_pb::hummock::write_limits::WriteLimit; + +pub type WriteLimiterRef = Arc; + +#[derive(Default)] +pub struct WriteLimiter { + limits: ArcSwap<( + HashMap, + HashMap, + )>, + notify: tokio::sync::Notify, +} + +impl WriteLimiter { + pub fn unused() -> Arc { + Arc::new(WriteLimiter::default()) + } + + pub fn update_write_limits(&self, limits: HashMap) { + let mut index: HashMap = HashMap::new(); + for (group_id, limit) in &limits { + for table_id in &limit.table_ids { + index.insert(table_id.into(), *group_id); + } + } + self.limits.store(Arc::new((limits, index))); + self.notify.notify_waiters(); + } + + /// Returns the reason if write for `table_id` is blocked. + fn try_find(&self, table_id: &TableId) -> Option { + let limits = self.limits.load(); + let group_id = match limits.1.get(table_id) { + None => { + return None; + } + Some(group_id) => *group_id, + }; + let reason = limits + .0 + .get(&group_id) + .as_ref() + .expect("table to group index should be accurate") + .reason + .clone(); + Some(reason) + } + + /// Waits until write is permitted for `table_id`. + pub async fn wait_permission(&self, table_id: TableId) { + // Fast path. + if self.try_find(&table_id).is_none() { + return; + } + let mut first_block_msg = true; + // Slow path. + loop { + let notified = self.notify.notified(); + match self.try_find(&table_id) { + Some(reason) => { + if first_block_msg { + first_block_msg = false; + tracing::warn!( + "write to table {} is blocked: {}", + table_id.table_id, + reason, + ); + } else { + tracing::warn!( + "write limiter is updated, but write to table {} is still blocked: {}", + table_id.table_id, + reason, + ); + } + } + None => { + break; + } + } + notified.await; + } + tracing::info!("write to table {} is unblocked", table_id.table_id,); + } +} diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index 2edca6aef5268..93adf69ba9343 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -43,6 +43,7 @@ #![feature(let_chains)] #![feature(associated_type_bounds)] #![feature(local_key_cell_methods)] +#![feature(exclusive_range_pattern)] pub mod hummock; pub mod memory; diff --git a/src/storage/src/monitor/compactor_metrics.rs b/src/storage/src/monitor/compactor_metrics.rs index 9aafd7b39e8df..bcd82a6b67e98 100644 --- a/src/storage/src/monitor/compactor_metrics.rs +++ b/src/storage/src/monitor/compactor_metrics.rs @@ -42,6 +42,7 @@ pub struct CompactorMetrics { pub sstable_avg_value_size: Histogram, pub iter_scan_key_counts: GenericCounterVec, pub write_build_l0_bytes: GenericCounter, + pub sstable_distinct_epoch_count: Histogram, } impl CompactorMetrics { @@ -171,7 +172,7 @@ impl CompactorMetrics { let opts = histogram_opts!( "compactor_sstable_avg_value_size", "Total bytes gotten from sstable_avg_value_size, for observing sstable_avg_value_size", - exponential_buckets(1.0, 2.0, 25).unwrap() // max 16MB + exponential_buckets(1.0, 2.0, 26).unwrap() // max 32MB ); let sstable_avg_value_size = register_histogram_with_registry!(opts, registry).unwrap(); @@ -198,6 +199,15 @@ impl CompactorMetrics { registry ).unwrap(); + let opts = histogram_opts!( + "compactor_sstable_distinct_epoch_count", + "Total number gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count", + exponential_buckets(1.0, 2.0, 17).unwrap() + ); + + let sstable_distinct_epoch_count = + register_histogram_with_registry!(opts, registry).unwrap(); + Self { compaction_upload_sst_counts, compact_write_bytes, @@ -219,6 +229,7 @@ impl CompactorMetrics { sstable_avg_value_size, iter_scan_key_counts, write_build_l0_bytes, + sstable_distinct_epoch_count, } } diff --git a/src/storage/src/monitor/monitored_store.rs b/src/storage/src/monitor/monitored_store.rs index d4b8ee67c950a..e95eb94432ecf 100644 --- a/src/storage/src/monitor/monitored_store.rs +++ b/src/storage/src/monitor/monitored_store.rs @@ -25,7 +25,7 @@ use tracing::error; use super::MonitoredStorageMetrics; use crate::error::{StorageError, StorageResult}; use crate::hummock::sstable_store::SstableStoreRef; -use crate::hummock::{HummockStorage, SstableIdManagerRef}; +use crate::hummock::{HummockStorage, SstableObjectIdManagerRef}; use crate::store::*; use crate::{ define_local_state_store_associated_type, define_state_store_associated_type, @@ -319,8 +319,8 @@ impl MonitoredStateStore { self.inner.sstable_store() } - pub fn sstable_id_manager(&self) -> SstableIdManagerRef { - self.inner.sstable_id_manager().clone() + pub fn sstable_object_id_manager(&self) -> SstableObjectIdManagerRef { + self.inner.sstable_object_id_manager().clone() } } diff --git a/src/storage/src/opts.rs b/src/storage/src/opts.rs index e0993e678af3f..33fdb7339b087 100644 --- a/src/storage/src/opts.rs +++ b/src/storage/src/opts.rs @@ -13,8 +13,8 @@ // limitations under the License. use risingwave_common::config::RwConfig; +use risingwave_common::system_param::default_system_params; use risingwave_common::system_param::reader::SystemParamsReader; -use risingwave_pb::meta::SystemParams; #[derive(Clone, Debug)] pub struct StorageOpts { @@ -74,17 +74,7 @@ pub struct StorageOpts { impl Default for StorageOpts { fn default() -> Self { let c = RwConfig::default(); - let p = SystemParams { - sstable_size_mb: Some(c.storage.sstable_size_mb), - block_size_kb: Some(c.storage.block_size_kb), - bloom_false_positive: Some(c.storage.bloom_false_positive), - data_directory: Some(c.storage.data_directory.clone()), - backup_storage_url: Some(c.backup.storage_url.clone()), - backup_storage_directory: Some(c.backup.storage_directory.clone()), - barrier_interval_ms: None, - checkpoint_frequency: None, - state_store: None, - }; + let p = default_system_params(); Self::from((&c, &p.into())) } } diff --git a/src/storage/src/store_impl.rs b/src/storage/src/store_impl.rs index bc8e827333e0e..ffcb410835674 100644 --- a/src/storage/src/store_impl.rs +++ b/src/storage/src/store_impl.rs @@ -23,11 +23,11 @@ use risingwave_object_store::object::{ }; use crate::error::StorageResult; -use crate::hummock::backup_reader::{parse_meta_snapshot_storage, BackupReader}; +use crate::hummock::backup_reader::BackupReaderRef; use crate::hummock::hummock_meta_client::MonitoredHummockMetaClient; use crate::hummock::sstable_store::SstableStoreRef; use crate::hummock::{ - HummockStorage, MemoryLimiter, SstableIdManagerRef, SstableStore, TieredCache, + HummockStorage, MemoryLimiter, SstableObjectIdManagerRef, SstableStore, TieredCache, TieredCacheMetricsBuilder, }; use crate::memory::sled::SledStateStore; @@ -600,17 +600,9 @@ impl StateStoreImpl { )); let notification_client = RpcNotificationClient::new(hummock_meta_client.get_inner().clone()); - - let backup_store = parse_meta_snapshot_storage( - &opts.backup_storage_url, - &opts.backup_storage_directory, - ) - .await?; - let backup_reader = BackupReader::new(backup_store); let inner = HummockStorage::new( opts.clone(), sstable_store, - backup_reader, hummock_meta_client.clone(), notification_client, state_store_metrics.clone(), @@ -642,16 +634,17 @@ impl StateStoreImpl { /// This trait is for aligning some common methods of `state_store_impl` for external use pub trait HummockTrait { - fn sstable_id_manager(&self) -> &SstableIdManagerRef; + fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef; fn sstable_store(&self) -> SstableStoreRef; fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManagerRef; fn get_memory_limiter(&self) -> Arc; + fn backup_reader(&self) -> BackupReaderRef; fn as_hummock(&self) -> Option<&HummockStorage>; } impl HummockTrait for HummockStorage { - fn sstable_id_manager(&self) -> &SstableIdManagerRef { - self.sstable_id_manager() + fn sstable_object_id_manager(&self) -> &SstableObjectIdManagerRef { + self.sstable_object_id_manager() } fn sstable_store(&self) -> SstableStoreRef { @@ -666,6 +659,10 @@ impl HummockTrait for HummockStorage { self.get_memory_limiter() } + fn backup_reader(&self) -> BackupReaderRef { + self.backup_reader() + } + fn as_hummock(&self) -> Option<&HummockStorage> { Some(self) } diff --git a/src/storage/src/table/batch_table/storage_table.rs b/src/storage/src/table/batch_table/storage_table.rs index 663e5f9295489..3c37728968b7c 100644 --- a/src/storage/src/table/batch_table/storage_table.rs +++ b/src/storage/src/table/batch_table/storage_table.rs @@ -25,9 +25,7 @@ use futures::{Stream, StreamExt}; use futures_async_stream::try_stream; use itertools::{Either, Itertools}; use risingwave_common::buffer::Bitmap; -use risingwave_common::catalog::{ - get_dist_key_in_pk_indices, ColumnDesc, ColumnId, Schema, TableId, TableOption, -}; +use risingwave_common::catalog::{ColumnDesc, ColumnId, Schema, TableId, TableOption}; use risingwave_common::hash::{VirtualNode, VnodeBitmapExt}; use risingwave_common::row::{self, OwnedRow, Row, RowExt}; use risingwave_common::util::ordered::*; @@ -89,11 +87,6 @@ pub struct StorageTableInner { // FIXME: revisit constructions and usages. pk_indices: Vec, - /// Indices of distribution key for computing vnode. - /// Note that the index is based on the all columns of the table, instead of the output ones. - // FIXME: revisit constructions and usages. - dist_key_indices: Vec, - /// Indices of distribution key for computing vnode. /// Note that the index is based on the primary key columns by `pk_indices`. dist_key_in_pk_indices: Vec, @@ -188,7 +181,7 @@ impl StorageTableInner { order_types: Vec, pk_indices: Vec, Distribution { - dist_key_indices, + dist_key_in_pk_indices, vnodes, }: Distribution, table_option: TableOption, @@ -249,7 +242,6 @@ impl StorageTableInner { } }; - let dist_key_in_pk_indices = get_dist_key_in_pk_indices(&dist_key_indices, &pk_indices); let key_output_indices = match key_output_indices.is_empty() { true => None, false => Some(key_output_indices), @@ -266,7 +258,6 @@ impl StorageTableInner { mapping: Arc::new(mapping), row_serde: Arc::new(row_serde), pk_indices, - dist_key_indices, dist_key_in_pk_indices, vnodes, table_option, @@ -287,6 +278,20 @@ impl StorageTableInner { pub fn pk_indices(&self) -> &[usize] { &self.pk_indices } + + pub fn output_indices(&self) -> &[usize] { + &self.output_indices + } + + /// Get the indices of the primary key columns in the output columns. + /// + /// Returns `None` if any of the primary key columns is not in the output columns. + pub fn pk_in_output_indices(&self) -> Option> { + self.pk_indices + .iter() + .map(|&i| self.output_indices.iter().position(|&j| i == j)) + .collect() + } } /// Point get @@ -374,6 +379,13 @@ impl StorageTableInner { Ok(None) } } + + /// Update the vnode bitmap of the storage table, returns the previous vnode bitmap. + #[must_use = "the executor should decide whether to manipulate the cache based on the previous vnode bitmap"] + pub fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> Arc { + assert_eq!(self.vnodes.len(), new_vnodes.len()); + std::mem::replace(&mut self.vnodes, new_vnodes) + } } pub trait PkAndRowStream = Stream, OwnedRow)>> + Send; @@ -474,7 +486,7 @@ impl StorageTableInner { })) .await?; - #[auto_enum(futures::Stream)] + #[auto_enum(futures03::Stream)] let iter = match iterators.len() { 0 => unreachable!(), 1 => iterators.into_iter().next().unwrap(), @@ -578,23 +590,21 @@ impl StorageTableInner { Some(Bytes::from(encoded_prefix[..prefix_len].to_vec())) } else { trace!( - "iter_with_pk_bounds dist_key_indices table_id {} not match prefix pk_prefix {:?} dist_key_indices {:?} pk_prefix_indices {:?}", + "iter_with_pk_bounds dist_key_indices table_id {} not match prefix pk_prefix {:?} pk_prefix_indices {:?}", self.table_id, pk_prefix, - self.dist_key_indices, pk_prefix_indices ); None }; trace!( - "iter_with_pk_bounds table_id {} prefix_hint {:?} start_key: {:?}, end_key: {:?} pk_prefix {:?} dist_key_indices {:?} pk_prefix_indices {:?}" , + "iter_with_pk_bounds table_id {} prefix_hint {:?} start_key: {:?}, end_key: {:?} pk_prefix {:?} pk_prefix_indices {:?}" , self.table_id, prefix_hint, start_key, end_key, pk_prefix, - self.dist_key_indices, pk_prefix_indices ); diff --git a/src/storage/src/table/mod.rs b/src/storage/src/table/mod.rs index 5a7ac8627104c..1bb9731eb7682 100644 --- a/src/storage/src/table/mod.rs +++ b/src/storage/src/table/mod.rs @@ -16,15 +16,16 @@ pub mod batch_table; use std::sync::{Arc, LazyLock}; +use itertools::Itertools; use risingwave_common::array::DataChunk; use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::catalog::Schema; use risingwave_common::hash::VirtualNode; -use risingwave_common::row::{OwnedRow, Row, RowExt}; -use risingwave_common::util::hash_util::Crc32FastBuilder; +use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::util::iter_util::ZipEqFast; use crate::error::StorageResult; + /// For tables without distribution (singleton), the `DEFAULT_VNODE` is encoded. pub const DEFAULT_VNODE: VirtualNode = VirtualNode::ZERO; @@ -32,7 +33,7 @@ pub const DEFAULT_VNODE: VirtualNode = VirtualNode::ZERO; #[derive(Debug)] pub struct Distribution { /// Indices of distribution key for computing vnode, based on the all columns of the table. - pub dist_key_indices: Vec, + pub dist_key_in_pk_indices: Vec, /// Virtual nodes that the table is partitioned into. pub vnodes: Arc, @@ -48,18 +49,29 @@ impl Distribution { vnodes.finish().into() }); Self { - dist_key_indices: vec![], + dist_key_in_pk_indices: vec![], vnodes: FALLBACK_VNODES.clone(), } } + pub fn fallback_vnodes() -> Arc { + /// A bitmap that only the default vnode is set. + static FALLBACK_VNODES: LazyLock> = LazyLock::new(|| { + let mut vnodes = BitmapBuilder::zeroed(VirtualNode::COUNT); + vnodes.set(DEFAULT_VNODE.to_index(), true); + vnodes.finish().into() + }); + + FALLBACK_VNODES.clone() + } + /// Distribution that accesses all vnodes, mainly used for tests. - pub fn all_vnodes(dist_key_indices: Vec) -> Self { + pub fn all_vnodes(dist_key_in_pk_indices: Vec) -> Self { /// A bitmap that all vnodes are set. static ALL_VNODES: LazyLock> = LazyLock::new(|| Bitmap::ones(VirtualNode::COUNT).into()); Self { - dist_key_indices, + dist_key_in_pk_indices, vnodes: ALL_VNODES.clone(), } } @@ -111,7 +123,7 @@ pub fn compute_vnode(row: impl Row, indices: &[usize], vnodes: &Bitmap) -> Virtu let vnode = if indices.is_empty() { DEFAULT_VNODE } else { - let vnode = (&row).project(indices).hash(Crc32FastBuilder).to_vnode(); + let vnode = VirtualNode::compute_row(&row, indices); check_vnode_is_set(vnode, vnodes); vnode }; @@ -124,18 +136,22 @@ pub fn compute_vnode(row: impl Row, indices: &[usize], vnodes: &Bitmap) -> Virtu /// Get vnode values with `indices` on the given `chunk`. pub fn compute_chunk_vnode( chunk: &DataChunk, - indices: &[usize], + dist_key_in_pk_indices: &[usize], + pk_indices: &[usize], vnodes: &Bitmap, ) -> Vec { - if indices.is_empty() { + if dist_key_in_pk_indices.is_empty() { vec![DEFAULT_VNODE; chunk.capacity()] } else { - chunk - .get_hash_values(indices, Crc32FastBuilder) + let dist_key_indices = dist_key_in_pk_indices + .iter() + .map(|idx| pk_indices[*idx]) + .collect_vec(); + + VirtualNode::compute_chunk(chunk, &dist_key_indices) .into_iter() .zip_eq_fast(chunk.vis().iter()) - .map(|(h, vis)| { - let vnode = h.to_vnode(); + .map(|(vnode, vis)| { // Ignore the invisible rows. if vis { check_vnode_is_set(vnode, vnodes); diff --git a/src/stream/Cargo.toml b/src/stream/Cargo.toml index 1c6d587db4841..7539ae6a2f55f 100644 --- a/src/stream/Cargo.toml +++ b/src/stream/Cargo.toml @@ -52,6 +52,7 @@ risingwave_pb = { path = "../prost" } risingwave_rpc_client = { path = "../rpc_client" } risingwave_source = { path = "../source" } risingwave_storage = { path = "../storage" } +serde_json = "1" smallvec = "1" static_assertions = "1" task_stats_alloc = { path = "../utils/task_stats_alloc" } @@ -76,3 +77,4 @@ workspace-hack = { path = "../workspace-hack" } [dev-dependencies] assert_matches = "1" risingwave_hummock_test = { path = "../storage/hummock_test", features = ["test"] } +tracing-test = "0.2" diff --git a/src/stream/src/common/builder.rs b/src/stream/src/common/builder.rs index 1ca5e68466385..ea78b7a69b533 100644 --- a/src/stream/src/common/builder.rs +++ b/src/stream/src/common/builder.rs @@ -45,8 +45,13 @@ pub struct StreamChunkBuilder { impl Drop for StreamChunkBuilder { fn drop(&mut self) { - // Possible to fail in some corner cases but should not in unit tests - debug_assert_eq!(self.size, 0, "dropping non-empty stream chunk builder"); + // Possible to fail when async task gets cancelled. + if self.size != 0 { + tracing::warn!( + remaining = self.size, + "dropping non-empty stream chunk builder" + ); + } } } diff --git a/src/stream/src/common/infallible_expr.rs b/src/stream/src/common/infallible_expr.rs deleted file mode 100644 index 2742c5960cb76..0000000000000 --- a/src/stream/src/common/infallible_expr.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use risingwave_common::array::{ArrayRef, DataChunk}; -use risingwave_common::row::{OwnedRow, Row}; -use risingwave_common::types::Datum; -use risingwave_expr::expr::Expression; -use risingwave_expr::ExprError; -use static_assertions::const_assert; - -pub trait InfallibleExpression: Expression { - fn eval_infallible(&self, input: &DataChunk, on_err: impl Fn(ExprError)) -> ArrayRef { - const_assert!(!crate::STRICT_MODE); - - #[expect(clippy::disallowed_methods)] - self.eval(input).unwrap_or_else(|_err| { - // When eval failed, recompute in row-based execution - // and pad with NULL for each failed row. - let mut array_builder = self.return_type().create_array_builder(input.cardinality()); - for row in input.rows_with_holes() { - if let Some(row) = row { - let datum = self.eval_row_infallible(&row.into_owned_row(), &on_err); - array_builder.append_datum(&datum); - } else { - array_builder.append_null(); - } - } - Arc::new(array_builder.finish()) - }) - } - - fn eval_row_infallible(&self, input: &OwnedRow, on_err: impl Fn(ExprError)) -> Datum { - const_assert!(!crate::STRICT_MODE); - - #[expect(clippy::disallowed_methods)] - self.eval_row(input).unwrap_or_else(|err| { - on_err(err); - None - }) - } -} - -impl InfallibleExpression for E {} diff --git a/src/stream/src/common/mod.rs b/src/stream/src/common/mod.rs index 026af37353995..ebbadb96a2f57 100644 --- a/src/stream/src/common/mod.rs +++ b/src/stream/src/common/mod.rs @@ -14,9 +14,7 @@ pub use builder::*; pub use column_mapping::*; -pub use infallible_expr::*; mod builder; mod column_mapping; -mod infallible_expr; pub mod table; diff --git a/src/stream/src/common/table/state_table.rs b/src/stream/src/common/table/state_table.rs index 3bd4c7f449284..31e9750c2841b 100644 --- a/src/stream/src/common/table/state_table.rs +++ b/src/stream/src/common/table/state_table.rs @@ -47,6 +47,7 @@ use risingwave_storage::StateStore; use tracing::trace; use super::watermark::{WatermarkBufferByEpoch, WatermarkBufferStrategy}; +use crate::cache::cache_may_stale; use crate::executor::{StreamExecutorError, StreamExecutorResult}; /// This num is arbitrary and we may want to improve this choice in the future. @@ -84,7 +85,7 @@ pub struct StateTableInner< /// Indices of distribution key for computing vnode. /// Note that the index is based on the all columns of the table, instead of the output ones. // FIXME: revisit constructions and usages. - dist_key_indices: Vec, + // dist_key_indices: Vec, /// Indices of distribution key for computing vnode. /// Note that the index is based on the primary key columns by `pk_indices`. @@ -158,9 +159,7 @@ where let order_types: Vec = table_catalog .pk .iter() - .map(|col_order| { - OrderType::from_protobuf(&col_order.get_order_type().unwrap().direction()) - }) + .map(|col_order| OrderType::from_protobuf(col_order.get_order_type().unwrap())) .collect(); let dist_key_indices: Vec = table_catalog .distribution_key @@ -200,15 +199,10 @@ where .collect(); let pk_serde = OrderedRowSerde::new(pk_data_types, order_types); - let Distribution { - dist_key_indices, - vnodes, - } = match vnodes { - Some(vnodes) => Distribution { - dist_key_indices, - vnodes, - }, - None => Distribution::fallback(), + let vnodes = match vnodes { + Some(vnodes) => vnodes, + + None => Distribution::fallback_vnodes(), }; let vnode_col_idx_in_pk = table_catalog.vnode_col_index.as_ref().and_then(|idx| { let vnode_col_idx = *idx as usize; @@ -253,7 +247,6 @@ where pk_serde, row_serde, pk_indices: pk_indices.to_vec(), - dist_key_indices, dist_key_in_pk_indices, prefix_hint_len, vnodes, @@ -323,31 +316,6 @@ where Distribution::fallback(), None, false, - 0, - ) - .await - } - - /// Create a state table without distribution, with given `prefix_hint_len`, used for unit - /// tests. - pub async fn new_without_distribution_with_prefix_hint_len( - store: S, - table_id: TableId, - columns: Vec, - order_types: Vec, - pk_indices: Vec, - prefix_hint_len: usize, - ) -> Self { - Self::new_with_distribution_inner( - store, - table_id, - columns, - order_types, - pk_indices, - Distribution::fallback(), - None, - true, - prefix_hint_len, ) .await } @@ -372,7 +340,6 @@ where distribution, value_indices, true, - 0, ) .await } @@ -395,7 +362,6 @@ where distribution, value_indices, false, - 0, ) .await } @@ -408,12 +374,11 @@ where order_types: Vec, pk_indices: Vec, Distribution { - dist_key_indices, + dist_key_in_pk_indices, vnodes, }: Distribution, value_indices: Option>, is_consistent_op: bool, - prefix_hint_len: usize, ) -> Self { let local_state_store = store .new_local(NewLocalOptions { @@ -447,16 +412,14 @@ where .collect_vec(), None => table_columns.iter().map(|c| c.column_id).collect_vec(), }; - let dist_key_in_pk_indices = get_dist_key_in_pk_indices(&dist_key_indices, &pk_indices); Self { table_id, local_store: local_state_store, pk_serde, row_serde: SD::new(&column_ids, Arc::from(data_types.into_boxed_slice())), pk_indices, - dist_key_indices, dist_key_in_pk_indices, - prefix_hint_len, + prefix_hint_len: 0, vnodes, table_option: Default::default(), vnode_col_idx_in_pk: None, @@ -477,7 +440,7 @@ where if self.vnode_col_idx_in_pk.is_some() { false } else { - self.dist_key_indices.is_empty() + self.dist_key_in_pk_indices.is_empty() } } @@ -505,8 +468,13 @@ where } /// Get the vnode value of the given row - pub fn compute_vnode(&self, row: impl Row) -> VirtualNode { - compute_vnode(row, &self.dist_key_indices, &self.vnodes) + // pub fn compute_vnode(&self, row: impl Row) -> VirtualNode { + // compute_vnode(row, &self.dist_key_indices, &self.vnodes) + // } + + /// Get the vnode value of the given row + pub fn compute_vnode_by_pk(&self, pk: impl Row) -> VirtualNode { + compute_vnode(pk, &self.dist_key_in_pk_indices, &self.vnodes) } // TODO: remove, should not be exposed to user @@ -518,9 +486,9 @@ where &self.pk_serde } - pub fn dist_key_indices(&self) -> &[usize] { - &self.dist_key_indices - } + // pub fn dist_key_indices(&self) -> &[usize] { + // &self.dist_key_indices + // } pub fn vnodes(&self) -> &Arc { &self.vnodes @@ -610,7 +578,7 @@ where /// Update the vnode bitmap of the state table, returns the previous vnode bitmap. #[must_use = "the executor should decide whether to manipulate the cache based on the previous vnode bitmap"] - pub fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> Arc { + pub fn update_vnode_bitmap(&mut self, new_vnodes: Arc) -> (Arc, bool) { assert!( !self.is_dirty(), "vnode bitmap should only be updated when state table is clean" @@ -623,9 +591,16 @@ where } assert_eq!(self.vnodes.len(), new_vnodes.len()); - self.cur_watermark = None; + let cache_may_stale = cache_may_stale(&self.vnodes, &new_vnodes); - std::mem::replace(&mut self.vnodes, new_vnodes) + if cache_may_stale { + self.cur_watermark = None; + } + + ( + std::mem::replace(&mut self.vnodes, new_vnodes), + cache_may_stale, + ) } } @@ -726,7 +701,12 @@ where pub fn write_chunk(&mut self, chunk: StreamChunk) { let (chunk, op) = chunk.into_parts(); - let vnodes = compute_chunk_vnode(&chunk, &self.dist_key_indices, &self.vnodes); + let vnodes = compute_chunk_vnode( + &chunk, + &self.dist_key_in_pk_indices, + &self.pk_indices, + &self.vnodes, + ); let value_chunk = if let Some(ref value_indices) = self.value_indices { chunk.clone().reorder_columns(value_indices) @@ -828,22 +808,28 @@ where } else { Some(self.pk_serde.prefix(1)) }; - let range_end_suffix = watermark.map(|watermark| { + let watermark_suffix = watermark.map(|watermark| { serialize_pk( row::once(Some(watermark)), prefix_serializer.as_ref().unwrap(), ) }); - if let Some(range_end_suffix) = range_end_suffix { - let range_begin_suffix = vec![]; - trace!(table_id = %self.table_id, range_end = ?range_end_suffix, vnodes = ?{ + if let Some(watermark_suffix) = watermark_suffix { + // We either serialize null into `0u8`, data into `(1u8 || scalar)`, or serialize null + // into `1u8`, data into `(0u8 || scalar)`. We do not want to delete null + // here, so `range_begin_suffix` cannot be `vec![]` when null is represented as `0u8`. + let range_begin_suffix = watermark_suffix + .first() + .map(|bit| vec![*bit]) + .unwrap_or_default(); + trace!(table_id = %self.table_id, watermark = ?watermark_suffix, vnodes = ?{ self.vnodes.iter_vnodes().collect_vec() }, "delete range"); for vnode in self.vnodes.iter_vnodes() { let mut range_begin = vnode.to_be_bytes().to_vec(); let mut range_end = range_begin.clone(); range_begin.extend(&range_begin_suffix); - range_end.extend(&range_end_suffix); + range_end.extend(&watermark_suffix); delete_ranges.push((Bytes::from(range_begin), Bytes::from(range_end))); } } @@ -986,7 +972,7 @@ where trace!( table_id = %self.table_id(), ?prefix_hint, ?encoded_key_range_with_vnode, ?pk_prefix, - dist_key_indices = ?self.dist_key_indices, ?pk_prefix_indices, + ?pk_prefix_indices, "storage_iter_with_prefix" ); diff --git a/src/stream/src/common/table/test_state_table.rs b/src/stream/src/common/table/test_state_table.rs index 832f3f573789e..40505c9f7c642 100644 --- a/src/stream/src/common/table/test_state_table.rs +++ b/src/stream/src/common/table/test_state_table.rs @@ -40,7 +40,7 @@ async fn test_state_table_update_insert() { ColumnDesc::unnamed(ColumnId::from(2), DataType::Int32), ColumnDesc::unnamed(ColumnId::from(4), DataType::Int32), ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_index = vec![0_usize]; let read_prefix_len_hint = 1; let table = gen_prost_table( @@ -211,7 +211,7 @@ async fn test_state_table_iter_with_prefix() { let test_env = prepare_hummock_test_env().await; // let pk_columns = vec![0, 1]; leave a message to indicate pk columns - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ @@ -339,7 +339,7 @@ async fn test_state_table_iter_with_pk_range() { let test_env = prepare_hummock_test_env().await; // let pk_columns = vec![0, 1]; leave a message to indicate pk columns - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ @@ -480,7 +480,7 @@ async fn test_mem_table_assertion() { ColumnDesc::unnamed(ColumnId::from(1), DataType::Int32), ColumnDesc::unnamed(ColumnId::from(2), DataType::Int32), ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_index = vec![0_usize]; let read_prefix_len_hint = 1; let table = gen_prost_table( @@ -515,7 +515,7 @@ async fn test_state_table_iter_with_value_indices() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), @@ -676,7 +676,7 @@ async fn test_state_table_iter_with_shuffle_value_indices() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), @@ -918,7 +918,7 @@ async fn test_state_table_write_chunk() { DataType::Boolean, DataType::Float32, ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_index = vec![0_usize]; let read_prefix_len_hint = 0; let table = gen_prost_table( @@ -1047,7 +1047,7 @@ async fn test_state_table_write_chunk_visibility() { DataType::Boolean, DataType::Float32, ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_index = vec![0_usize]; let read_prefix_len_hint = 0; let table = gen_prost_table( @@ -1173,7 +1173,7 @@ async fn test_state_table_write_chunk_value_indices() { DataType::Boolean, DataType::Float32, ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_index = vec![0_usize]; let read_prefix_len_hint = 0; let table = gen_prost_table_with_value_indices( @@ -1276,7 +1276,7 @@ async fn test_state_table_may_exist() { let test_env = prepare_hummock_test_env().await; // let pk_columns = vec![0, 1]; leave a message to indicate pk columns - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ColumnId::from(0), ColumnId::from(1), ColumnId::from(2)]; let column_descs = vec![ diff --git a/src/stream/src/common/table/test_storage_table.rs b/src/stream/src/common/table/test_storage_table.rs index 2e5e482cac674..77cb3708489f1 100644 --- a/src/stream/src/common/table/test_storage_table.rs +++ b/src/stream/src/common/table/test_storage_table.rs @@ -50,7 +50,7 @@ async fn test_storage_table_value_indices() { ColumnDesc::unnamed(column_ids[4], DataType::Varchar), ]; let pk_indices = vec![0_usize, 2_usize]; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let value_indices = vec![1, 3, 4]; let read_prefix_len_hint = 2; let table = gen_prost_table_with_value_indices( @@ -179,7 +179,7 @@ async fn test_shuffled_column_id_for_storage_table_get_row() { ColumnDesc::unnamed(column_ids[2], DataType::Int32), ]; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let pk_indices = vec![0_usize, 1_usize]; let read_prefix_len_hint = 2; let table = gen_prost_table( @@ -281,7 +281,7 @@ async fn test_row_based_storage_table_point_get_in_batch_mode() { ColumnDesc::unnamed(column_ids[2], DataType::Int32), ]; let pk_indices = vec![0_usize, 1_usize]; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let value_indices: Vec = vec![0, 1, 2]; let read_prefix_len_hint = 0; let table = gen_prost_table_with_value_indices( @@ -376,7 +376,7 @@ async fn test_batch_scan_with_value_indices() { const TEST_TABLE_ID: TableId = TableId { table_id: 233 }; let test_env = prepare_hummock_test_env().await; - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![ ColumnId::from(0), ColumnId::from(1), diff --git a/src/stream/src/common/table/test_utils.rs b/src/stream/src/common/table/test_utils.rs index 426a254e639ed..526f6864b3a99 100644 --- a/src/stream/src/common/table/test_utils.rs +++ b/src/stream/src/common/table/test_utils.rs @@ -16,8 +16,8 @@ use itertools::Itertools; use risingwave_common::catalog::{ColumnDesc, TableId}; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_common::util::sort_util::OrderType; -use risingwave_pb::catalog::Table as ProstTable; -use risingwave_pb::common::{PbColumnOrder, PbOrderType}; +use risingwave_pb::catalog::PbTable; +use risingwave_pb::common::PbColumnOrder; use risingwave_pb::plan_common::ColumnCatalog; pub(crate) fn gen_prost_table( @@ -26,7 +26,7 @@ pub(crate) fn gen_prost_table( order_types: Vec, pk_index: Vec, read_prefix_len_hint: u32, -) -> ProstTable { +) -> PbTable { let col_len = column_descs.len() as i32; gen_prost_table_with_value_indices( table_id, @@ -45,15 +45,13 @@ pub(crate) fn gen_prost_table_with_value_indices( pk_index: Vec, read_prefix_len_hint: u32, value_indices: Vec, -) -> ProstTable { +) -> PbTable { let prost_pk = pk_index .iter() .zip_eq_fast(order_types.iter()) .map(|(idx, order)| PbColumnOrder { column_index: *idx as _, - order_type: Some(PbOrderType { - direction: order.to_protobuf() as _, - }), + order_type: Some(order.to_protobuf()), }) .collect(); let prost_columns = column_descs @@ -64,7 +62,7 @@ pub(crate) fn gen_prost_table_with_value_indices( }) .collect(); - ProstTable { + PbTable { id: table_id.table_id(), columns: prost_columns, pk: prost_pk, diff --git a/src/stream/src/error.rs b/src/stream/src/error.rs index 0c8809225a03a..7c92a2b4e0235 100644 --- a/src/stream/src/error.rs +++ b/src/stream/src/error.rs @@ -16,7 +16,7 @@ use std::backtrace::Backtrace; use risingwave_common::array::ArrayError; use risingwave_expr::ExprError; -use risingwave_pb::ProstFieldNotFound; +use risingwave_pb::PbFieldNotFound; use risingwave_storage::error::StorageError; use crate::executor::StreamExecutorError; @@ -36,7 +36,7 @@ enum Inner { #[error("Array/Chunk error: {0}")] Array(ArrayError), - #[error("Executor error: {0}")] + #[error("Executor error: {0:?}")] Executor(Box), #[error(transparent)] @@ -95,8 +95,8 @@ impl From for StreamError { } } -impl From for StreamError { - fn from(err: ProstFieldNotFound) -> Self { +impl From for StreamError { + fn from(err: PbFieldNotFound) -> Self { Self::from(anyhow::anyhow!( "Failed to decode prost: field not found `{}`", err.0 diff --git a/src/stream/src/executor/actor.rs b/src/stream/src/executor/actor.rs index 7044e62b20615..6fc621933b3fb 100644 --- a/src/stream/src/executor/actor.rs +++ b/src/stream/src/executor/actor.rs @@ -15,9 +15,9 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use anyhow::anyhow; use await_tree::InstrumentAwait; use futures::future::join_all; -use futures::pin_mut; use hytra::TrAdder; use minitrace::prelude::*; use parking_lot::Mutex; @@ -173,33 +173,33 @@ where }; let mut last_epoch: Option = None; - - let stream = Box::new(self.consumer).execute(); - pin_mut!(stream); + let mut stream = Box::pin(Box::new(self.consumer).execute()); // Drive the streaming task with an infinite loop - while let Some(barrier) = stream - .next() - .in_span(span) - .instrument_await( - last_epoch.map_or("Epoch ".into(), |e| format!("Epoch {}", e.curr)), - ) - .await - .transpose()? - { - last_epoch = Some(barrier.epoch); + let result = loop { + let barrier = match stream + .try_next() + .in_span(span) + .instrument_await( + last_epoch.map_or("Epoch ".into(), |e| format!("Epoch {}", e.curr)), + ) + .await + { + Ok(Some(barrier)) => barrier, + Ok(None) => break Err(anyhow!("actor exited unexpectedly").into()), + Err(err) => break Err(err), + }; // Collect barriers to local barrier manager self.context.lock_barrier_manager().collect(id, &barrier); // Then stop this actor if asked - let to_stop = barrier.is_stop_or_update_drop_actor(id); - if to_stop { - tracing::trace!(actor_id = id, "actor exit"); - return Ok(()); + if barrier.is_stop_or_update_drop_actor(id) { + break Ok(()); } // Tracing related work + last_epoch = Some(barrier.epoch); span = { let mut span = Span::enter_with_local_parent("actor_poll"); span.add_property(|| ("otel.name", span_name.to_string())); @@ -208,8 +208,24 @@ where span.add_property(|| ("epoch", barrier.epoch.curr.to_string())); span }; - } + }; + + spawn_blocking_drop_stream(stream).await; - Ok(()) + tracing::trace!(actor_id = id, "actor exit"); + result } } + +/// Drop the stream in a blocking task to avoid interfering with other actors. +/// +/// Logically the actor is dropped after we send the barrier with `Drop` mutation to the +/// downstream,thus making the `drop`'s progress asynchronous. However, there might be a +/// considerable amount of data in the executors' in-memory cache, dropping these structures might +/// be a CPU-intensive task. This may lead to the runtime being unable to schedule other actors if +/// the `drop` is called on the current thread. +pub async fn spawn_blocking_drop_stream(stream: T) { + let _ = tokio::task::spawn_blocking(move || drop(stream)) + .instrument_await("drop_stream") + .await; +} diff --git a/src/stream/src/executor/aggregation/agg_call.rs b/src/stream/src/executor/aggregation/agg_call.rs index 3221ea90cc8b1..bfe86ca8d4c50 100644 --- a/src/stream/src/executor/aggregation/agg_call.rs +++ b/src/stream/src/executor/aggregation/agg_call.rs @@ -15,7 +15,7 @@ use std::slice; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_expr::expr::{AggKind, ExpressionRef}; /// An aggregation function may accept 0, 1 or 2 arguments. @@ -62,7 +62,7 @@ pub struct AggCall { pub return_type: DataType, /// Order requirements specified in order by clause of agg call - pub order_pairs: Vec, + pub column_orders: Vec, /// Whether the stream is append-only. /// Specific streaming aggregator may optimize its implementation diff --git a/src/stream/src/executor/aggregation/agg_group.rs b/src/stream/src/executor/aggregation/agg_group.rs index 6b875b176c7a8..e26a2e487e232 100644 --- a/src/stream/src/executor/aggregation/agg_group.rs +++ b/src/stream/src/executor/aggregation/agg_group.rs @@ -31,80 +31,15 @@ use crate::common::table::state_table::StateTable; use crate::executor::error::StreamExecutorResult; use crate::executor::PkIndices; -mod changes_builder { - use super::*; - - pub(super) fn insert_new_outputs( - curr_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize { - new_ops.push(Op::Insert); - - for (builder, new_value) in builders.iter_mut().zip_eq_fast(curr_outputs.iter()) { - trace!("insert datum: {:?}", new_value); - builder.append_datum(new_value); - } - - 1 - } - - pub(super) fn delete_old_outputs( - prev_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize { - new_ops.push(Op::Delete); - - for (builder, old_value) in builders.iter_mut().zip_eq_fast(prev_outputs.iter()) { - trace!("delete datum: {:?}", old_value); - builder.append_datum(old_value); - } - - 1 - } - - pub(super) fn update_outputs( - prev_outputs: &OwnedRow, - curr_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize { - if prev_outputs == curr_outputs { - // Fast path for no change. - return 0; - } - - new_ops.push(Op::UpdateDelete); - new_ops.push(Op::UpdateInsert); - - for (builder, old_value, new_value) in itertools::multizip(( - builders.iter_mut(), - prev_outputs.iter(), - curr_outputs.iter(), - )) { - trace!( - "update datum: prev = {:?}, curr = {:?}", - old_value, - new_value - ); - builder.append_datum(old_value); - builder.append_datum(new_value); - } - - 2 - } -} - pub trait Strategy { - fn build_changes( + /// Infer the change type of the aggregation result. Don't need to take the ownership of + /// `prev_outputs` and `curr_outputs`. + fn infer_change_type( prev_row_count: usize, curr_row_count: usize, prev_outputs: Option<&OwnedRow>, curr_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize; + ) -> Option; } /// The strategy that always outputs the aggregation result no matter there're input rows or not. @@ -114,14 +49,12 @@ pub struct AlwaysOutput; pub struct OnlyOutputIfHasInput; impl Strategy for AlwaysOutput { - fn build_changes( + fn infer_change_type( prev_row_count: usize, curr_row_count: usize, prev_outputs: Option<&OwnedRow>, curr_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize { + ) -> Option { match prev_outputs { None => { // First time to build changes, assert to ensure correctness. @@ -130,46 +63,48 @@ impl Strategy for AlwaysOutput { assert_eq!(prev_row_count, 0); // Generate output no matter whether current row count is 0 or not. - changes_builder::insert_new_outputs(curr_outputs, builders, new_ops) + Some(AggChangeType::Insert) } Some(prev_outputs) => { - if prev_row_count == 0 && curr_row_count == 0 { - // No rows exist. - return 0; + if prev_row_count == 0 && curr_row_count == 0 || prev_outputs == curr_outputs { + // No rows exist, or output is not changed. + None + } else { + Some(AggChangeType::Update) } - changes_builder::update_outputs(prev_outputs, curr_outputs, builders, new_ops) } } } } impl Strategy for OnlyOutputIfHasInput { - fn build_changes( + fn infer_change_type( prev_row_count: usize, curr_row_count: usize, prev_outputs: Option<&OwnedRow>, curr_outputs: &OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> usize { + ) -> Option { match (prev_row_count, curr_row_count) { (0, 0) => { // No rows of current group exist. - 0 + None } (0, _) => { // Insert new output row for this newly emerged group. - changes_builder::insert_new_outputs(curr_outputs, builders, new_ops) + Some(AggChangeType::Insert) } (_, 0) => { // Delete old output row for this newly disappeared group. - let prev_outputs = prev_outputs.expect("must exist previous outputs"); - changes_builder::delete_old_outputs(prev_outputs, builders, new_ops) + Some(AggChangeType::Delete) } (_, _) => { // Update output row. - let prev_outputs = prev_outputs.expect("must exist previous outputs"); - changes_builder::update_outputs(prev_outputs, curr_outputs, builders, new_ops) + if prev_outputs.expect("must exist previous outputs") == curr_outputs { + // No output change. + None + } else { + Some(AggChangeType::Update) + } } } } @@ -178,7 +113,7 @@ impl Strategy for OnlyOutputIfHasInput { /// [`AggGroup`] manages agg states of all agg calls for one `group_key`. pub struct AggGroup { /// Group key. - group_key: Option, // TODO(rc): we can remove this + group_key: Option, /// Current managed states for all [`AggCall`]s. states: Vec>, @@ -201,14 +136,25 @@ impl Debug for AggGroup { } } -/// Information about the changes built by `AggState::build_changes`. -pub struct AggChangesInfo { - /// The number of rows and corresponding ops in the changes. - pub n_appended_ops: usize, - /// The result row containing group key prefix. To be inserted into result table. - pub result_row: OwnedRow, - /// The previous outputs of all agg calls recorded in the `AggState`. - pub prev_outputs: Option, +/// Type of aggregation change. +pub enum AggChangeType { + Insert, + Delete, + Update, +} + +/// Aggregation change. The result rows include group key prefix. +pub enum AggChange { + Insert { + new_row: OwnedRow, + }, + Delete { + old_row: OwnedRow, + }, + Update { + old_row: OwnedRow, + new_row: OwnedRow, + }, } impl AggGroup { @@ -318,8 +264,10 @@ impl AggGroup { self.states.iter_mut().for_each(|state| state.reset()); } - /// Get the outputs of all managed agg states. + /// Get the outputs of all managed agg states, without group key prefix. /// Possibly need to read/sync from state table if the state not cached in memory. + /// This method is idempotent, i.e. it can be called multiple times and the outputs are + /// guaranteed to be the same. pub async fn get_outputs( &mut self, storages: &[AggStateStorage], @@ -349,15 +297,9 @@ impl AggGroup { .map(OwnedRow::new) } - /// Build changes into `builders` and `new_ops`, according to previous and current agg outputs. - /// Returns [`AggChangesInfo`] contains information about changes built. - /// The saved previous outputs will be updated to the latest outputs after building changes. - pub fn build_changes( - &mut self, - curr_outputs: OwnedRow, - builders: &mut [ArrayBuilderImpl], - new_ops: &mut Vec, - ) -> AggChangesInfo { + /// Build aggregation result change, according to previous and current agg outputs. + /// The saved previous outputs will be updated to the latest outputs after this method. + pub fn build_change(&mut self, curr_outputs: OwnedRow) -> Option { let prev_row_count = self.prev_row_count(); let curr_row_count = curr_outputs[self.row_count_index] .as_ref() @@ -370,27 +312,78 @@ impl AggGroup { curr_row_count ); - let n_appended_ops = Strtg::build_changes( + let change_type = Strtg::infer_change_type( prev_row_count, curr_row_count, self.prev_outputs.as_ref(), &curr_outputs, - builders, - new_ops, ); - let result_row = self.group_key().chain(&curr_outputs).into_owned_row(); + // Split `AggChangeType` and `AggChange` to avoid unnecessary cloning. + change_type.map(|change_type| match change_type { + AggChangeType::Insert => { + let new_row = self.group_key().chain(&curr_outputs).into_owned_row(); + self.prev_outputs = Some(curr_outputs); + AggChange::Insert { new_row } + } + AggChangeType::Delete => { + let prev_outputs = self.prev_outputs.take(); + let old_row = self.group_key().chain(prev_outputs).into_owned_row(); + AggChange::Delete { old_row } + } + AggChangeType::Update => { + let new_row = self.group_key().chain(&curr_outputs).into_owned_row(); + let prev_outputs = self.prev_outputs.replace(curr_outputs); + let old_row = self.group_key().chain(prev_outputs).into_owned_row(); + AggChange::Update { old_row, new_row } + } + }) + } - let prev_outputs = if n_appended_ops == 0 { - self.prev_outputs.clone() - } else { - std::mem::replace(&mut self.prev_outputs, Some(curr_outputs)) - }; + pub fn apply_change_to_builders( + &self, + change: &AggChange, + builders: &mut [ArrayBuilderImpl], + ops: &mut Vec, + ) { + match change { + AggChange::Insert { new_row } => { + trace!("insert row: {:?}", new_row); + ops.push(Op::Insert); + for (builder, new_value) in builders.iter_mut().zip_eq_fast(new_row.iter()) { + builder.append_datum(new_value); + } + } + AggChange::Delete { old_row } => { + trace!("delete row: {:?}", old_row); + ops.push(Op::Delete); + for (builder, old_value) in builders.iter_mut().zip_eq_fast(old_row.iter()) { + builder.append_datum(old_value); + } + } + AggChange::Update { old_row, new_row } => { + trace!("update row: prev = {:?}, curr = {:?}", old_row, new_row); + ops.push(Op::UpdateDelete); + ops.push(Op::UpdateInsert); + for (builder, old_value, new_value) in + itertools::multizip((builders.iter_mut(), old_row.iter(), new_row.iter())) + { + builder.append_datum(old_value); + builder.append_datum(new_value); + } + } + } + } - AggChangesInfo { - n_appended_ops, - result_row, - prev_outputs, + pub fn apply_change_to_result_table( + &self, + change: &AggChange, + result_table: &mut StateTable, + ) { + match change { + AggChange::Insert { new_row } => result_table.insert(new_row), + AggChange::Delete { old_row } => result_table.delete(old_row), + AggChange::Update { old_row, new_row } => result_table.update(old_row, new_row), } } } diff --git a/src/stream/src/executor/aggregation/distinct.rs b/src/stream/src/executor/aggregation/distinct.rs index 924f6ce0f225c..285d979d247ee 100644 --- a/src/stream/src/executor/aggregation/distinct.rs +++ b/src/stream/src/executor/aggregation/distinct.rs @@ -275,7 +275,7 @@ mod tests { return_type: DataType::Int64, distinct, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, } @@ -310,12 +310,12 @@ mod tests { // group key columns for data_type in group_key_types { add_column_desc(data_type.clone()); - order_types.push(OrderType::Ascending); + order_types.push(OrderType::ascending()); } // distinct key column add_column_desc(indices_and_calls[0].1.args.arg_types()[0].clone()); - order_types.push(OrderType::Ascending); + order_types.push(OrderType::ascending()); // count columns for (_, _) in indices_and_calls { diff --git a/src/stream/src/executor/aggregation/minput.rs b/src/stream/src/executor/aggregation/minput.rs index a390ecb3b1f04..588a7f3d80094 100644 --- a/src/stream/src/executor/aggregation/minput.rs +++ b/src/stream/src/executor/aggregation/minput.rs @@ -81,22 +81,22 @@ impl MaterializedInputState { // `min`/`max` need not to order by any other columns, but have to // order by the agg value implicitly. let order_type = if agg_call.kind == AggKind::Min { - OrderType::Ascending + OrderType::ascending() } else { - OrderType::Descending + OrderType::descending() }; (vec![arg_col_indices[0]], vec![order_type]) } else { agg_call - .order_pairs + .column_orders .iter() - .map(|p| (p.column_idx, p.order_type)) + .map(|p| (p.column_index, p.order_type)) .unzip() }; let pk_len = pk_indices.len(); order_col_indices.extend(pk_indices.iter()); - order_types.extend(itertools::repeat_n(OrderType::Ascending, pk_len)); + order_types.extend(itertools::repeat_n(OrderType::ascending(), pk_len)); // map argument columns to state table column indices let state_table_arg_col_indices = arg_col_indices @@ -290,7 +290,7 @@ mod tests { use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqFast; - use risingwave_common::util::sort_util::{OrderPair, OrderType}; + use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_expr::expr::AggKind; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::StateStore; @@ -350,7 +350,7 @@ mod tests { kind, args: AggArgs::Unary(arg_type.clone(), arg_idx), return_type: arg_type, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -376,8 +376,8 @@ mod tests { &input_schema, vec![2, 3], vec![ - OrderType::Ascending, // for AggKind::Min - OrderType::Ascending, + OrderType::ascending(), // for AggKind::Min + OrderType::ascending(), ], ) .await; @@ -485,8 +485,8 @@ mod tests { &input_schema, vec![2, 3], vec![ - OrderType::Descending, // for AggKind::Max - OrderType::Ascending, + OrderType::descending(), // for AggKind::Max + OrderType::ascending(), ], ) .await; @@ -595,8 +595,8 @@ mod tests { &input_schema, vec![0, 3], vec![ - OrderType::Ascending, // for AggKind::Min - OrderType::Ascending, + OrderType::ascending(), // for AggKind::Min + OrderType::ascending(), ], ) .await; @@ -604,8 +604,8 @@ mod tests { &input_schema, vec![1, 3], vec![ - OrderType::Descending, // for AggKind::Max - OrderType::Ascending, + OrderType::descending(), // for AggKind::Max + OrderType::ascending(), ], ) .await; @@ -704,9 +704,9 @@ mod tests { &input_schema, vec![2, 1, 3], vec![ - OrderType::Ascending, // c ASC - OrderType::Descending, // b DESC for AggKind::Max - OrderType::Ascending, // _row_id ASC + OrderType::ascending(), // c ASC + OrderType::descending(), // b DESC for AggKind::Max + OrderType::ascending(), // _row_id ASC ], ) .await; @@ -811,8 +811,8 @@ mod tests { &input_schema, vec![0, 1], vec![ - OrderType::Ascending, // for AggKind::Min - OrderType::Ascending, + OrderType::ascending(), // for AggKind::Min + OrderType::ascending(), ], ) .await; @@ -925,8 +925,8 @@ mod tests { &input_schema, vec![0, 1], vec![ - OrderType::Ascending, // for AggKind::Min - OrderType::Ascending, + OrderType::ascending(), // for AggKind::Min + OrderType::ascending(), ], ) .await; @@ -1044,9 +1044,9 @@ mod tests { kind: AggKind::StringAgg, args: AggArgs::Binary([DataType::Varchar, DataType::Varchar], [0, 1]), return_type: DataType::Varchar, - order_pairs: vec![ - OrderPair::new(2, OrderType::Ascending), // b ASC - OrderPair::new(0, OrderType::Descending), // a DESC + column_orders: vec![ + ColumnOrder::new(2, OrderType::ascending()), // b ASC + ColumnOrder::new(0, OrderType::descending()), // a DESC ], append_only: false, filter: None, @@ -1058,9 +1058,9 @@ mod tests { &input_schema, vec![2, 0, 4, 1], vec![ - OrderType::Ascending, // b ASC - OrderType::Descending, // a DESC - OrderType::Ascending, // _row_id ASC + OrderType::ascending(), // b ASC + OrderType::descending(), // a DESC + OrderType::ascending(), // _row_id ASC ], ) .await; @@ -1146,9 +1146,9 @@ mod tests { kind: AggKind::ArrayAgg, args: AggArgs::Unary(DataType::Int32, 1), // array_agg(b) return_type: DataType::Int32, - order_pairs: vec![ - OrderPair::new(2, OrderType::Ascending), // c ASC - OrderPair::new(0, OrderType::Descending), // a DESC + column_orders: vec![ + ColumnOrder::new(2, OrderType::ascending()), // c ASC + ColumnOrder::new(0, OrderType::descending()), // a DESC ], append_only: false, filter: None, @@ -1160,9 +1160,9 @@ mod tests { &input_schema, vec![2, 0, 3, 1], vec![ - OrderType::Ascending, // c ASC - OrderType::Descending, // a DESC - OrderType::Ascending, // _row_id ASC + OrderType::ascending(), // c ASC + OrderType::descending(), // a DESC + OrderType::ascending(), // _row_id ASC ], ) .await; diff --git a/src/stream/src/executor/aggregation/mod.rs b/src/stream/src/executor/aggregation/mod.rs index 5958ccdb9b685..bbe9c4febf2db 100644 --- a/src/stream/src/executor/aggregation/mod.rs +++ b/src/stream/src/executor/aggregation/mod.rs @@ -27,7 +27,6 @@ use risingwave_storage::StateStore; use super::ActorContextRef; use crate::common::table::state_table::StateTable; -use crate::common::InfallibleExpression; use crate::executor::error::StreamExecutorResult; use crate::executor::Executor; @@ -66,7 +65,7 @@ pub fn generate_agg_schema( Schema { fields } } -pub fn agg_call_filter_res( +pub async fn agg_call_filter_res( ctx: &ActorContextRef, identity: &str, agg_call: &AggCall, @@ -90,6 +89,7 @@ pub fn agg_call_filter_res( let data_chunk = DataChunk::new(columns.to_vec(), capacity); if let Bool(filter_res) = filter .eval_infallible(&data_chunk, |err| ctx.on_compute_error(err, identity)) + .await .as_ref() { Some(filter_res.to_bitmap()) diff --git a/src/stream/src/executor/aggregation/value.rs b/src/stream/src/executor/aggregation/value.rs index b0b64a1a5339c..d2afb071d27dd 100644 --- a/src/stream/src/executor/aggregation/value.rs +++ b/src/stream/src/executor/aggregation/value.rs @@ -90,7 +90,7 @@ mod tests { kind: risingwave_expr::expr::AggKind::Count, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -133,7 +133,7 @@ mod tests { kind: risingwave_expr::expr::AggKind::Max, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: true, filter: None, distinct: false, diff --git a/src/stream/src/executor/backfill.rs b/src/stream/src/executor/backfill.rs index ef8a0dfc00cf0..a8677a85719d6 100644 --- a/src/stream/src/executor/backfill.rs +++ b/src/stream/src/executor/backfill.rs @@ -25,7 +25,7 @@ use risingwave_common::buffer::BitmapBuilder; use risingwave_common::catalog::Schema; use risingwave_common::row::{self, OwnedRow, Row, RowExt}; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_common::util::sort_util::OrderType; +use risingwave_common::util::sort_util::{compare_datum, OrderType}; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_storage::store::PrefetchOptions; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -64,8 +64,8 @@ pub struct BackfillExecutor { /// Upstream with the same schema with the upstream table. upstream: BoxedExecutor, - /// The column indices need to be forwarded to the downstream. - upstream_indices: Vec, + /// The column indices need to be forwarded to the downstream from the upstream and table scan. + output_indices: Vec, progress: CreateMviewProgress, @@ -83,7 +83,7 @@ where pub fn new( table: StorageTable, upstream: BoxedExecutor, - upstream_indices: Vec, + output_indices: Vec, progress: CreateMviewProgress, schema: Schema, pk_indices: PkIndices, @@ -96,7 +96,7 @@ where }, table, upstream, - upstream_indices, + output_indices, actor_id: progress.actor_id(), progress, } @@ -104,10 +104,9 @@ where #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(mut self) { - // Table storage primary key. - let table_pk_indices = self.table.pk_indices(); + // The primary key columns, in the output columns of the table scan. + let pk_in_output_indices = self.table.pk_in_output_indices().unwrap(); let pk_order = self.table.pk_serializer().get_order_types(); - let upstream_indices = self.upstream_indices; let mut upstream = self.upstream.execute(); @@ -139,7 +138,7 @@ where // Forward messages directly to the downstream. #[for_await] for message in upstream { - if let Some(message) = Self::mapping_message(message?, &upstream_indices) { + if let Some(message) = Self::mapping_message(message?, &self.output_indices) { yield message; } } @@ -213,10 +212,10 @@ where Self::mark_chunk( chunk, current_pos, - table_pk_indices, + &pk_in_output_indices, pk_order, ), - &upstream_indices, + &self.output_indices, )); } } @@ -255,7 +254,7 @@ where processed_rows += chunk.cardinality() as u64; yield Message::Chunk(Self::mapping_chunk( chunk, - &upstream_indices, + &self.output_indices, )); } @@ -272,11 +271,14 @@ where .last() .unwrap() .1 - .project(table_pk_indices) + .project(&pk_in_output_indices) .into_owned_row(), ); processed_rows += chunk.cardinality() as u64; - yield Message::Chunk(Self::mapping_chunk(chunk, &upstream_indices)); + yield Message::Chunk(Self::mapping_chunk( + chunk, + &self.output_indices, + )); } } } @@ -293,7 +295,7 @@ where // Forward messages directly to the downstream. #[for_await] for msg in upstream { - if let Some(msg) = Self::mapping_message(msg?, &upstream_indices) { + if let Some(msg) = Self::mapping_message(msg?, &self.output_indices) { if let Some(barrier) = msg.as_barrier() { self.progress.finish(barrier.epoch.curr); } @@ -360,7 +362,7 @@ where fn mark_chunk( chunk: StreamChunk, current_pos: &OwnedRow, - table_pk_indices: PkIndicesRef<'_>, + pk_in_output_indices: PkIndicesRef<'_>, pk_order: &[OrderType], ) -> StreamChunk { let chunk = chunk.compact(); @@ -369,12 +371,11 @@ where // Use project to avoid allocation. for v in data.rows().map(|row| { match row - .project(table_pk_indices) + .project(pk_in_output_indices) .iter() - .zip_eq_fast(pk_order.iter()) - .cmp_by(current_pos.iter(), |(x, order), y| match order { - OrderType::Ascending => x.cmp(&y), - OrderType::Descending => y.cmp(&x), + .zip_eq_fast(pk_order.iter().copied()) + .cmp_by(current_pos.iter(), |(x, order), y| { + compare_datum(x, y, order) }) { Ordering::Less | Ordering::Equal => true, Ordering::Greater => false, @@ -396,10 +397,7 @@ where } fn mapping_watermark(watermark: Watermark, upstream_indices: &[usize]) -> Option { - upstream_indices - .iter() - .position(|&idx| idx == watermark.col_idx) - .map(|idx| watermark.with_idx(idx)) + watermark.transform_with_indices(upstream_indices) } fn mapping_message(msg: Message, upstream_indices: &[usize]) -> Option { diff --git a/src/stream/src/executor/barrier_recv.rs b/src/stream/src/executor/barrier_recv.rs new file mode 100644 index 0000000000000..5bf01c139d168 --- /dev/null +++ b/src/stream/src/executor/barrier_recv.rs @@ -0,0 +1,107 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use futures::StreamExt; +use risingwave_common::catalog::Schema; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio_stream::wrappers::UnboundedReceiverStream; + +use super::{ + ActorContext, ActorContextRef, Barrier, BoxedMessageStream, Executor, Message, PkIndicesRef, + StreamExecutorError, +}; + +/// The executor only for receiving barrier from the meta service. It always resides in the leaves +/// of the streaming graph. +pub struct BarrierRecvExecutor { + _ctx: ActorContextRef, + identity: String, + + /// The barrier receiver registered in the local barrier manager. + barrier_receiver: UnboundedReceiver, +} + +impl BarrierRecvExecutor { + pub fn new( + ctx: ActorContextRef, + barrier_receiver: UnboundedReceiver, + executor_id: u64, + ) -> Self { + Self { + _ctx: ctx, + identity: format!("BarrierRecvExecutor {:X}", executor_id), + barrier_receiver, + } + } + + pub fn for_test(barrier_receiver: UnboundedReceiver) -> Self { + Self::new(ActorContext::create(0), barrier_receiver, 0) + } +} + +impl Executor for BarrierRecvExecutor { + fn execute(self: Box) -> BoxedMessageStream { + UnboundedReceiverStream::new(self.barrier_receiver) + .map(|barrier| Ok(Message::Barrier(barrier))) + .chain(futures::stream::once(async { + // We do not use the stream termination as the control message, and this line should + // never be reached in normal cases. So we just return an error here. + Err(StreamExecutorError::channel_closed("barrier receiver")) + })) + .boxed() + } + + fn schema(&self) -> &Schema { + Schema::empty() + } + + fn pk_indices(&self) -> PkIndicesRef<'_> { + &[] + } + + fn identity(&self) -> &str { + &self.identity + } +} + +#[cfg(test)] +mod tests { + use futures::pin_mut; + use tokio::sync::mpsc; + + use super::*; + use crate::executor::test_utils::StreamExecutorTestExt; + + #[tokio::test] + async fn test_barrier_recv() { + let (barrier_tx, barrier_rx) = mpsc::unbounded_channel(); + + let barrier_recv = BarrierRecvExecutor::for_test(barrier_rx).boxed(); + let stream = barrier_recv.execute(); + pin_mut!(stream); + + barrier_tx.send(Barrier::new_test_barrier(114)).unwrap(); + barrier_tx.send(Barrier::new_test_barrier(514)).unwrap(); + + let barrier_1 = stream.next_unwrap_ready_barrier().unwrap(); + assert_eq!(barrier_1.epoch.curr, 114); + let barrier_2 = stream.next_unwrap_ready_barrier().unwrap(); + assert_eq!(barrier_2.epoch.curr, 514); + + stream.next_unwrap_pending(); + + drop(barrier_tx); + assert!(stream.next_unwrap_ready().is_err()); + } +} diff --git a/src/stream/src/executor/chain.rs b/src/stream/src/executor/chain.rs index b899d45f80eab..de98292eff448 100644 --- a/src/stream/src/executor/chain.rs +++ b/src/stream/src/executor/chain.rs @@ -14,12 +14,11 @@ use futures::StreamExt; use futures_async_stream::try_stream; -use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; use super::error::StreamExecutorError; use super::{expect_first_barrier, BoxedExecutor, Executor, ExecutorInfo, Message}; -use crate::executor::{PkIndices, Watermark}; +use crate::executor::PkIndices; use crate::task::{ActorId, CreateMviewProgress}; /// [`ChainExecutor`] is an executor that enables synchronization between the existing stream and @@ -31,8 +30,6 @@ pub struct ChainExecutor { upstream: BoxedExecutor, - upstream_indices: Vec, - progress: CreateMviewProgress, actor_id: ActorId, @@ -43,27 +40,10 @@ pub struct ChainExecutor { upstream_only: bool, } -fn mapping_chunk(chunk: StreamChunk, upstream_indices: &[usize]) -> StreamChunk { - let (ops, columns, visibility) = chunk.into_inner(); - let mapped_columns = upstream_indices - .iter() - .map(|&i| columns[i].clone()) - .collect(); - StreamChunk::new(ops, mapped_columns, visibility) -} - -fn mapping_watermark(watermark: Watermark, upstream_indices: &[usize]) -> Option { - upstream_indices - .iter() - .position(|&idx| idx == watermark.col_idx) - .map(|idx| watermark.with_idx(idx)) -} - impl ChainExecutor { pub fn new( snapshot: BoxedExecutor, upstream: BoxedExecutor, - upstream_indices: Vec, progress: CreateMviewProgress, schema: Schema, pk_indices: PkIndices, @@ -77,7 +57,6 @@ impl ChainExecutor { }, snapshot, upstream, - upstream_indices, actor_id: progress.actor_id(), progress, upstream_only, @@ -97,7 +76,9 @@ impl ChainExecutor { // Otherwise, it means we've recovered and the snapshot is already consumed. let to_consume_snapshot = barrier.is_add_dispatcher(self.actor_id) && !self.upstream_only; - if self.upstream_only { + // If the barrier is a conf change of creating this mview, and the snapshot is not to be + // consumed, we can finish the progress immediately. + if barrier.is_add_dispatcher(self.actor_id) && self.upstream_only { self.progress.finish(barrier.epoch.curr); } @@ -120,21 +101,11 @@ impl ChainExecutor { // first barrier. #[for_await] for msg in upstream { - match msg? { - Message::Watermark(watermark) => { - match mapping_watermark(watermark, &self.upstream_indices) { - Some(mapped_watermark) => yield Message::Watermark(mapped_watermark), - None => continue, - } - } - Message::Chunk(chunk) => { - yield Message::Chunk(mapping_chunk(chunk, &self.upstream_indices)); - } - Message::Barrier(barrier) => { - self.progress.finish(barrier.epoch.curr); - yield Message::Barrier(barrier); - } + let msg = msg?; + if to_consume_snapshot && let Message::Barrier(barrier) = &msg { + self.progress.finish(barrier.epoch.curr); } + yield msg; } } } @@ -212,15 +183,7 @@ mod test { ], )); - let chain = ChainExecutor::new( - first, - second, - vec![0], - progress, - schema, - PkIndices::new(), - false, - ); + let chain = ChainExecutor::new(first, second, progress, schema, PkIndices::new(), false); let mut chain = Box::new(chain).execute(); chain.next().await; diff --git a/src/stream/src/executor/dispatch.rs b/src/stream/src/executor/dispatch.rs index d8d569c382902..f1a8c9d1ee23d 100644 --- a/src/stream/src/executor/dispatch.rs +++ b/src/stream/src/executor/dispatch.rs @@ -25,10 +25,9 @@ use itertools::Itertools; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::buffer::BitmapBuilder; use risingwave_common::hash::{ActorMapping, ExpandedActorMapping, VirtualNode}; -use risingwave_common::util::hash_util::Crc32FastBuilder; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_pb::stream_plan::update_mutation::DispatcherUpdate as ProstDispatcherUpdate; -use risingwave_pb::stream_plan::Dispatcher as ProstDispatcher; +use risingwave_pb::stream_plan::update_mutation::PbDispatcherUpdate; +use risingwave_pb::stream_plan::PbDispatcher; use smallvec::{smallvec, SmallVec}; use tracing::event; @@ -106,7 +105,7 @@ impl DispatchExecutorInner { /// Add new dispatchers to the executor. Will check whether their ids are unique. fn add_dispatchers<'a>( &mut self, - new_dispatchers: impl IntoIterator, + new_dispatchers: impl IntoIterator, ) -> StreamResult<()> { let new_dispatchers: Vec<_> = new_dispatchers .into_iter() @@ -136,7 +135,7 @@ impl DispatchExecutorInner { /// Update the dispatcher BEFORE we actually dispatch this barrier. We'll only add the new /// outputs. - fn pre_update_dispatcher(&mut self, update: &ProstDispatcherUpdate) -> StreamResult<()> { + fn pre_update_dispatcher(&mut self, update: &PbDispatcherUpdate) -> StreamResult<()> { let outputs: Vec<_> = update .added_downstream_actor_id .iter() @@ -151,7 +150,7 @@ impl DispatchExecutorInner { /// Update the dispatcher AFTER we dispatch this barrier. We'll remove some outputs and finally /// update the hash mapping. - fn post_update_dispatcher(&mut self, update: &ProstDispatcherUpdate) -> StreamResult<()> { + fn post_update_dispatcher(&mut self, update: &PbDispatcherUpdate) -> StreamResult<()> { let ids = update.removed_downstream_actor_id.iter().copied().collect(); let dispatcher = self.find_dispatcher(update.dispatcher_id); @@ -174,7 +173,7 @@ impl DispatchExecutorInner { /// For `Add` and `Update`, update the dispatchers before we dispatch the barrier. fn pre_mutate_dispatchers(&mut self, mutation: &Option>) -> StreamResult<()> { let Some(mutation) = mutation.as_deref() else { - return Ok(()) + return Ok(()); }; match mutation { @@ -199,7 +198,7 @@ impl DispatchExecutorInner { /// For `Stop` and `Update`, update the dispatchers after we dispatch the barrier. fn post_mutate_dispatchers(&mut self, mutation: &Option>) -> StreamResult<()> { let Some(mutation) = mutation.as_deref() else { - return Ok(()) + return Ok(()); }; match mutation { @@ -262,15 +261,12 @@ impl StreamConsumer for DispatchExecutor { #[for_await] for msg in input { let msg: Message = msg?; - let (barrier, message) = match msg { + let (barrier, span) = match msg { Message::Chunk(_) => (None, "dispatch_chunk"), Message::Barrier(ref barrier) => (Some(barrier.clone()), "dispatch_barrier"), Message::Watermark(_) => (None, "dispatch_watermark"), }; - self.inner - .dispatch(msg) - .verbose_instrument_await(message) - .await?; + self.inner.dispatch(msg).instrument_await(span).await?; if let Some(barrier) = barrier { yield barrier; } @@ -291,7 +287,7 @@ impl DispatcherImpl { pub fn new( context: &SharedContext, actor_id: ActorId, - dispatcher: &ProstDispatcher, + dispatcher: &PbDispatcher, ) -> StreamResult { let outputs = dispatcher .downstream_actor_id @@ -446,14 +442,20 @@ pub trait Dispatcher: Debug + 'static { #[derive(Debug)] pub struct RoundRobinDataDispatcher { outputs: Vec, + output_indices: Vec, cur: usize, dispatcher_id: DispatcherId, } impl RoundRobinDataDispatcher { - pub fn new(outputs: Vec, dispatcher_id: DispatcherId) -> Self { + pub fn new( + outputs: Vec, + output_indices: Vec, + dispatcher_id: DispatcherId, + ) -> Self { Self { outputs, + output_indices, cur: 0, dispatcher_id, } @@ -465,6 +467,7 @@ impl Dispatcher for RoundRobinDataDispatcher { fn dispatch_data(&mut self, chunk: StreamChunk) -> Self::DataFuture<'_> { async move { + let chunk = chunk.reorder_columns(&self.output_indices); self.outputs[self.cur].send(Message::Chunk(chunk)).await?; self.cur += 1; self.cur %= self.outputs.len(); @@ -484,9 +487,11 @@ impl Dispatcher for RoundRobinDataDispatcher { fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { async move { - // always broadcast watermark - for output in &mut self.outputs { - output.send(Message::Watermark(watermark.clone())).await?; + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark + for output in &mut self.outputs { + output.send(Message::Watermark(watermark.clone())).await?; + } } Ok(()) } @@ -569,9 +574,11 @@ impl Dispatcher for HashDataDispatcher { fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { async move { - // always broadcast watermark - for output in &mut self.outputs { - output.send(Message::Watermark(watermark.clone())).await?; + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark + for output in &mut self.outputs { + output.send(Message::Watermark(watermark.clone())).await?; + } } Ok(()) } @@ -585,13 +592,7 @@ impl Dispatcher for HashDataDispatcher { let num_outputs = self.outputs.len(); // get hash value of every line by its key - let hash_builder = Crc32FastBuilder; - let vnodes = chunk - .data_chunk() - .get_hash_values(&self.keys, hash_builder) - .into_iter() - .map(|hash| hash.to_vnode()) - .collect_vec(); + let vnodes = VirtualNode::compute_chunk(chunk.data_chunk(), &self.keys); tracing::trace!(target: "events::stream::dispatch::hash", "\n{}\n keys {:?} => {:?}", chunk.to_pretty_string(), self.keys, vnodes); @@ -751,8 +752,11 @@ impl Dispatcher for BroadcastDispatcher { fn dispatch_watermark(&mut self, watermark: Watermark) -> Self::WatermarkFuture<'_> { async move { - for output in self.outputs.values_mut() { - output.send(Message::Watermark(watermark.clone())).await?; + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + // always broadcast watermark + for output in self.outputs.values_mut() { + output.send(Message::Watermark(watermark.clone())).await?; + } } Ok(()) } @@ -851,7 +855,10 @@ impl Dispatcher for SimpleDispatcher { .exactly_one() .expect("expect exactly one output"); - output.send(Message::Watermark(watermark)).await + if let Some(watermark) = watermark.transform_with_indices(&self.output_indices) { + output.send(Message::Watermark(watermark)).await?; + } + Ok(()) } } @@ -881,6 +888,7 @@ mod tests { use risingwave_common::array::{Array, ArrayBuilder, I32ArrayBuilder, Op}; use risingwave_common::catalog::Schema; use risingwave_common::hash::VirtualNode; + use risingwave_common::util::hash_util::Crc32FastBuilder; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_pb::stream_plan::DispatcherType; @@ -1018,7 +1026,7 @@ mod tests { let broadcast_dispatcher = DispatcherImpl::new( &ctx, actor_id, - &ProstDispatcher { + &PbDispatcher { r#type: DispatcherType::Broadcast as _, dispatcher_id: broadcast_dispatcher_id, downstream_actor_id: vec![untouched, old], @@ -1031,7 +1039,7 @@ mod tests { let simple_dispatcher = DispatcherImpl::new( &ctx, actor_id, - &ProstDispatcher { + &PbDispatcher { r#type: DispatcherType::Simple as _, dispatcher_id: simple_dispatcher_id, downstream_actor_id: vec![old_simple], @@ -1068,7 +1076,7 @@ mod tests { // 4. Send a configuration change barrier for broadcast dispatcher. let dispatcher_updates = maplit::hashmap! { - actor_id => vec![ProstDispatcherUpdate { + actor_id => vec![PbDispatcherUpdate { actor_id, dispatcher_id: broadcast_dispatcher_id, added_downstream_actor_id: vec![new], @@ -1119,7 +1127,7 @@ mod tests { // 9. Send a configuration change barrier for simple dispatcher. let dispatcher_updates = maplit::hashmap! { - actor_id => vec![ProstDispatcherUpdate { + actor_id => vec![PbDispatcherUpdate { actor_id, dispatcher_id: simple_dispatcher_id, added_downstream_actor_id: vec![new_simple], diff --git a/src/stream/src/executor/dynamic_filter.rs b/src/stream/src/executor/dynamic_filter.rs index 98768c1aa2b4d..c8f7a1f1bf612 100644 --- a/src/stream/src/executor/dynamic_filter.rs +++ b/src/stream/src/executor/dynamic_filter.rs @@ -25,9 +25,7 @@ use risingwave_common::hash::VnodeBitmapExt; use risingwave_common::row::{once, OwnedRow as RowData, Row}; use risingwave_common::types::{DataType, Datum, ScalarImpl, ToDatumRef, ToOwnedDatum}; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_expr::expr::{ - new_binary_expr, BoxedExpression, InputRefExpression, LiteralExpression, -}; +use risingwave_expr::expr::{build, BoxedExpression, InputRefExpression, LiteralExpression}; use risingwave_pb::expr::expr_node::Type as ExprNodeType; use risingwave_pb::expr::expr_node::Type::{ GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, @@ -42,7 +40,7 @@ use super::{ ActorContextRef, BoxedExecutor, BoxedMessageStream, Executor, Message, PkIndices, PkIndicesRef, }; use crate::common::table::state_table::StateTable; -use crate::common::{InfallibleExpression, StreamChunkBuilder}; +use crate::common::StreamChunkBuilder; use crate::executor::expect_first_barrier_from_aligned_stream; pub struct DynamicFilterExecutor { @@ -93,7 +91,7 @@ impl DynamicFilterExecutor { } } - fn apply_batch( + async fn apply_batch( &mut self, data_chunk: &DataChunk, ops: Vec, @@ -104,11 +102,16 @@ impl DynamicFilterExecutor { let mut new_visibility = BitmapBuilder::with_capacity(ops.len()); let mut last_res = false; - let eval_results = condition.map(|cond| { - cond.eval_infallible(data_chunk, |err| { - self.ctx.on_compute_error(err, self.identity()) - }) - }); + let eval_results = if let Some(cond) = condition { + Some( + cond.eval_infallible(data_chunk, |err| { + self.ctx.on_compute_error(err, &self.identity) + }) + .await, + ) + } else { + None + }; for (idx, (row, op)) in data_chunk.rows().zip_eq_debug(ops.iter()).enumerate() { let left_val = row.datum_at(self.key_l).to_owned_datum(); @@ -262,11 +265,13 @@ impl DynamicFilterExecutor { assert_eq!(l_data_type, r_data_type); let dynamic_cond = move |literal: Datum| { literal.map(|scalar| { - new_binary_expr( + build( self.comparator, DataType::Boolean, - Box::new(InputRefExpression::new(l_data_type.clone(), self.key_l)), - Box::new(LiteralExpression::new(r_data_type.clone(), Some(scalar))), + vec![ + Box::new(InputRefExpression::new(l_data_type.clone(), self.key_l)), + Box::new(LiteralExpression::new(r_data_type.clone(), Some(scalar))), + ], ) }) }; @@ -323,7 +328,7 @@ impl DynamicFilterExecutor { let condition = dynamic_cond(right_val).transpose()?; let (new_ops, new_visibility) = - self.apply_batch(&data_chunk, ops, condition)?; + self.apply_batch(&data_chunk, ops, condition).await?; let (columns, _) = data_chunk.into_parts(); @@ -451,7 +456,7 @@ impl DynamicFilterExecutor { // Update the vnode bitmap for the left state table if asked. if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(self.ctx.id) { - let _previous_vnode_bitmap = + let (_previous_vnode_bitmap, _cache_may_stale) = self.left_table.update_vnode_bitmap(vnode_bitmap); } @@ -501,7 +506,7 @@ mod tests { mem_state.clone(), TableId::new(0), vec![column_descs.clone()], - vec![OrderType::Ascending], + vec![OrderType::ascending()], vec![0], ) .await; @@ -509,7 +514,7 @@ mod tests { mem_state, TableId::new(1), vec![column_descs], - vec![OrderType::Ascending], + vec![OrderType::ascending()], vec![0], ) .await; diff --git a/src/stream/src/executor/error.rs b/src/stream/src/executor/error.rs index eafb17e977ae4..88c09bd7d043e 100644 --- a/src/stream/src/executor/error.rs +++ b/src/stream/src/executor/error.rs @@ -21,7 +21,7 @@ use risingwave_common::util::value_encoding::error::ValueEncodingError; use risingwave_connector::error::ConnectorError; use risingwave_connector::sink::SinkError; use risingwave_expr::ExprError; -use risingwave_pb::ProstFieldNotFound; +use risingwave_pb::PbFieldNotFound; use risingwave_rpc_client::error::RpcError; use risingwave_storage::error::StorageError; @@ -173,8 +173,8 @@ impl From for StreamExecutorError { } } -impl From for StreamExecutorError { - fn from(err: ProstFieldNotFound) -> Self { +impl From for StreamExecutorError { + fn from(err: PbFieldNotFound) -> Self { Self::from(anyhow::anyhow!( "Failed to decode prost: field not found `{}`", err.0 diff --git a/src/stream/src/executor/filter.rs b/src/stream/src/executor/filter.rs index 80354bd48f691..cdbc7fd466e50 100644 --- a/src/stream/src/executor/filter.rs +++ b/src/stream/src/executor/filter.rs @@ -21,52 +21,33 @@ use risingwave_common::catalog::Schema; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_expr::expr::BoxedExpression; -use super::{ - ActorContextRef, Executor, ExecutorInfo, PkIndicesRef, SimpleExecutor, SimpleExecutorWrapper, - StreamExecutorResult, Watermark, -}; -use crate::common::InfallibleExpression; - -pub type FilterExecutor = SimpleExecutorWrapper; - -impl FilterExecutor { - pub fn new( - ctx: ActorContextRef, - input: Box, - expr: BoxedExpression, - executor_id: u64, - ) -> Self { - let info = input.info(); - - SimpleExecutorWrapper { - input, - inner: SimpleFilterExecutor::new(ctx, info, expr, executor_id), - } - } -} +use super::*; /// `FilterExecutor` filters data with the `expr`. The `expr` takes a chunk of data, /// and returns a boolean array on whether each item should be retained. And then, /// `FilterExecutor` will insert, delete or update element into next executor according /// to the result of the expression. -pub struct SimpleFilterExecutor { +pub struct FilterExecutor { ctx: ActorContextRef, info: ExecutorInfo, + input: BoxedExecutor, /// Expression of the current filter, note that the filter must always have the same output for /// the same input. expr: BoxedExpression, } -impl SimpleFilterExecutor { +impl FilterExecutor { pub fn new( ctx: ActorContextRef, - input_info: ExecutorInfo, + input: Box, expr: BoxedExpression, executor_id: u64, ) -> Self { + let input_info = input.info(); Self { ctx, + input, info: ExecutorInfo { schema: input_info.schema, pk_indices: input_info.pk_indices, @@ -155,7 +136,7 @@ impl SimpleFilterExecutor { } } -impl Debug for SimpleFilterExecutor { +impl Debug for FilterExecutor { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("FilterExecutor") .field("expr", &self.expr) @@ -163,21 +144,7 @@ impl Debug for SimpleFilterExecutor { } } -impl SimpleExecutor for SimpleFilterExecutor { - fn map_filter_chunk(&self, chunk: StreamChunk) -> StreamExecutorResult> { - let chunk = chunk.compact(); - - let pred_output = self.expr.eval_infallible(chunk.data_chunk(), |err| { - self.ctx.on_compute_error(err, self.identity()) - }); - - Self::filter(chunk, pred_output) - } - - fn handle_watermark(&self, watermark: Watermark) -> StreamExecutorResult> { - Ok(vec![watermark]) - } - +impl Executor for FilterExecutor { fn schema(&self) -> &Schema { &self.info.schema } @@ -189,6 +156,40 @@ impl SimpleExecutor for SimpleFilterExecutor { fn identity(&self) -> &str { &self.info.identity } + + fn execute(self: Box) -> BoxedMessageStream { + self.execute_inner().boxed() + } +} + +impl FilterExecutor { + #[try_stream(ok = Message, error = StreamExecutorError)] + async fn execute_inner(self) { + let input = self.input.execute(); + #[for_await] + for msg in input { + let msg = msg?; + match msg { + Message::Watermark(w) => yield Message::Watermark(w), + Message::Chunk(chunk) => { + let chunk = chunk.compact(); + + let pred_output = self + .expr + .eval_infallible(chunk.data_chunk(), |err| { + self.ctx.on_compute_error(err, &self.info.identity) + }) + .await; + + match Self::filter(chunk, pred_output)? { + Some(new_chunk) => yield Message::Chunk(new_chunk), + None => continue, + } + } + m => yield m, + } + } + } } #[cfg(test)] @@ -198,8 +199,8 @@ mod tests { use risingwave_common::array::StreamChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; - use risingwave_expr::expr::{new_binary_expr, InputRefExpression}; - use risingwave_pb::expr::expr_node::Type; + use risingwave_expr::expr::{build, InputRefExpression}; + use risingwave_pb::expr::expr_node::PbType; use super::super::test_utils::MockSource; use super::super::*; @@ -233,15 +234,16 @@ mod tests { }; let source = MockSource::with_chunks(schema, PkIndices::new(), vec![chunk1, chunk2]); - let left_expr = InputRefExpression::new(DataType::Int64, 0); - let right_expr = InputRefExpression::new(DataType::Int64, 1); - let test_expr = new_binary_expr( - Type::GreaterThan, + let test_expr = build( + PbType::GreaterThan, DataType::Boolean, - Box::new(left_expr), - Box::new(right_expr), + vec![ + Box::new(InputRefExpression::new(DataType::Int64, 0)), + Box::new(InputRefExpression::new(DataType::Int64, 1)), + ], ) .unwrap(); + let filter = Box::new(FilterExecutor::new( ActorContext::create(123), Box::new(source), diff --git a/src/stream/src/executor/global_simple_agg.rs b/src/stream/src/executor/global_simple_agg.rs index ed9c82e67c3ec..a8d879fe6414e 100644 --- a/src/stream/src/executor/global_simple_agg.rs +++ b/src/stream/src/executor/global_simple_agg.rs @@ -16,14 +16,12 @@ use futures::StreamExt; use futures_async_stream::try_stream; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::Schema; -use risingwave_common::row::RowExt; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_storage::StateStore; use super::agg_common::AggExecutorArgs; use super::aggregation::{ - agg_call_filter_res, iter_table_storage, AggChangesInfo, AggStateStorage, AlwaysOutput, - DistinctDeduplicater, + agg_call_filter_res, iter_table_storage, AggStateStorage, AlwaysOutput, DistinctDeduplicater, }; use super::*; use crate::common::table::state_table::StateTable; @@ -168,20 +166,19 @@ impl GlobalSimpleAggExecutor { let (ops, columns, visibility) = chunk.into_inner(); // Calculate the row visibility for every agg call. - let visibilities: Vec<_> = this - .agg_calls - .iter() - .map(|agg_call| { - agg_call_filter_res( - &this.actor_ctx, - &this.info.identity, - agg_call, - &columns, - visibility.as_ref(), - capacity, - ) - }) - .try_collect()?; + let mut visibilities = Vec::with_capacity(this.agg_calls.len()); + for agg_call in &this.agg_calls { + let result = agg_call_filter_res( + &this.actor_ctx, + &this.info.identity, + agg_call, + &columns, + visibility.as_ref(), + capacity, + ) + .await?; + visibilities.push(result); + } // Materialize input chunk if needed. this.storages @@ -252,29 +249,18 @@ impl GlobalSimpleAggExecutor { let mut new_ops = Vec::with_capacity(2); // Retrieve modified states and put the changes into the builders. let curr_outputs = vars.agg_group.get_outputs(&this.storages).await?; - let AggChangesInfo { - result_row, - prev_outputs, - n_appended_ops, - } = vars - .agg_group - .build_changes(curr_outputs, &mut builders, &mut new_ops); - - if n_appended_ops == 0 { + if let Some(change) = vars.agg_group.build_change(curr_outputs) { + vars.agg_group + .apply_change_to_builders(&change, &mut builders, &mut new_ops); + vars.agg_group + .apply_change_to_result_table(&change, &mut this.result_table); + this.result_table.commit(epoch).await?; + } else { // Agg result is not changed. this.result_table.commit_no_data_expected(epoch); return Ok(None); } - // Update the result table with latest agg outputs. - if let Some(prev_outputs) = prev_outputs { - let old_row = vars.agg_group.group_key().chain(prev_outputs); - this.result_table.update(old_row, result_row); - } else { - this.result_table.insert(result_row); - } - this.result_table.commit(epoch).await?; - let columns = builders .into_iter() .map(|builder| builder.finish().into()) @@ -409,7 +395,7 @@ mod tests { kind: AggKind::Count, // as row count, index: 0 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -418,7 +404,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -427,7 +413,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -436,7 +422,7 @@ mod tests { kind: AggKind::Min, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, diff --git a/src/stream/src/executor/hash_agg.rs b/src/stream/src/executor/hash_agg.rs index 60554ad077b04..5e22af7868366 100644 --- a/src/stream/src/executor/hash_agg.rs +++ b/src/stream/src/executor/hash_agg.rs @@ -14,6 +14,7 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; +use std::ptr::NonNull; use std::sync::Arc; use futures::{stream, StreamExt, TryStreamExt}; @@ -24,7 +25,6 @@ use risingwave_common::array::StreamChunk; use risingwave_common::buffer::{Bitmap, BitmapBuilder}; use risingwave_common::catalog::Schema; use risingwave_common::hash::{HashKey, PrecomputedBuildHasher}; -use risingwave_common::row::RowExt; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_storage::StateStore; @@ -41,14 +41,14 @@ use super::{ use crate::cache::{cache_may_stale, new_with_hasher, ExecutorCache}; use crate::common::table::state_table::StateTable; use crate::error::StreamResult; -use crate::executor::aggregation::{generate_agg_schema, AggCall, AggChangesInfo, AggGroup}; +use crate::executor::aggregation::{generate_agg_schema, AggCall, AggGroup as GenericAggGroup}; use crate::executor::error::StreamExecutorError; use crate::executor::monitor::StreamingMetrics; use crate::executor::{BoxedMessageStream, Message}; use crate::task::AtomicU64Ref; -type BoxedAggGroup = Box>; -type AggGroupCache = ExecutorCache, PrecomputedBuildHasher>; +type AggGroup = GenericAggGroup; +type AggGroupCache = ExecutorCache, PrecomputedBuildHasher>; /// [`HashAggExecutor`] could process large amounts of data using a state backend. It works as /// follows: @@ -255,19 +255,17 @@ impl HashAggExecutor { Some(async { // Create `AggGroup` for the current group if not exists. This will // fetch previous agg result from the result table. - let agg_group = Box::new( - AggGroup::create( - Some(key.deserialize(group_key_types)?), - &this.agg_calls, - &this.storages, - &this.result_table, - &this.input_pk_indices, - this.row_count_index, - this.extreme_cache_size, - &this.input_schema, - ) - .await?, - ); + let agg_group = AggGroup::create( + Some(key.deserialize(group_key_types)?), + &this.agg_calls, + &this.storages, + &this.result_table, + &this.input_pk_indices, + this.row_count_index, + this.extreme_cache_size, + &this.input_schema, + ) + .await?; Ok::<_, StreamExecutorError>((key.clone(), agg_group)) }) } @@ -310,20 +308,19 @@ impl HashAggExecutor { let (ops, columns, visibility) = chunk.into_inner(); // Calculate the row visibility for every agg call. - let call_visibilities: Vec<_> = this - .agg_calls - .iter() - .map(|agg_call| { - agg_call_filter_res( - &this.actor_ctx, - &this.info.identity, - agg_call, - &columns, - visibility.as_ref(), - capacity, - ) - }) - .try_collect()?; + let mut call_visibilities = Vec::with_capacity(this.agg_calls.len()); + for agg_call in &this.agg_calls { + let agg_call_filter_res = agg_call_filter_res( + &this.actor_ctx, + &this.info.identity, + agg_call, + &columns, + visibility.as_ref(), + capacity, + ) + .await?; + call_visibilities.push(agg_call_filter_res); + } // Materialize input chunk if needed. this.storages @@ -346,7 +343,7 @@ impl HashAggExecutor { // Apply chunk to each of the state (per agg_call), for each group. for (key, visibility) in group_visibilities { - let agg_group = vars.agg_group_cache.get_mut(&key).unwrap().as_mut(); + let agg_group = vars.agg_group_cache.get_mut(&key).unwrap(); let visibilities = call_visibilities .iter() .map(Option::as_ref) @@ -412,8 +409,6 @@ impl HashAggExecutor { let dirty_cnt = vars.group_change_set.len(); if dirty_cnt > 0 { // Produce the stream chunk - let group_key_data_types = - &this.info.schema.data_types()[..this.group_key_indices.len()]; let mut group_chunks = IterChunks::chunks(vars.group_change_set.drain(), this.chunk_size); while let Some(batch) = group_chunks.next() { @@ -424,8 +419,7 @@ impl HashAggExecutor { let agg_group = vars .agg_group_cache .get_mut(key) - .expect("changed group must have corresponding AggGroup") - .as_mut(); + .expect("changed group must have corresponding AggGroup"); agg_group.flush_state_if_needed(&mut this.storages).await?; } @@ -437,11 +431,17 @@ impl HashAggExecutor { // Calculate current outputs, concurrently. let futs = keys_in_batch.into_iter().map(|key| { - // Pop out the agg group temporarily. - let mut agg_group = vars - .agg_group_cache - .pop(&key) - .expect("changed group must have corresponding AggGroup"); + // Get agg group of the key. + let agg_group = { + let mut ptr: NonNull<_> = vars + .agg_group_cache + .get_mut(&key) + .expect("changed group must have corresponding AggGroup") + .into(); + // SAFETY: `key`s in `keys_in_batch` are unique by nature, because they're + // from `group_change_set` which is a set. + unsafe { ptr.as_mut() } + }; async { let curr_outputs = agg_group.get_outputs(&this.storages).await?; Ok::<_, StreamExecutorError>((key, agg_group, curr_outputs)) @@ -453,34 +453,11 @@ impl HashAggExecutor { .try_collect() .await?; - for (key, mut agg_group, curr_outputs) in outputs_in_batch { - let AggChangesInfo { - n_appended_ops, - result_row, - prev_outputs, - } = agg_group.build_changes( - curr_outputs, - &mut builders[this.group_key_indices.len()..], - &mut new_ops, - ); - - if n_appended_ops != 0 { - for _ in 0..n_appended_ops { - key.deserialize_to_builders( - &mut builders[..this.group_key_indices.len()], - group_key_data_types, - )?; - } - if let Some(prev_outputs) = prev_outputs { - let old_row = agg_group.group_key().chain(prev_outputs); - this.result_table.update(old_row, result_row); - } else { - this.result_table.insert(result_row); - } + for (_key, agg_group, curr_outputs) in outputs_in_batch { + if let Some(change) = agg_group.build_change(curr_outputs) { + agg_group.apply_change_to_builders(&change, &mut builders, &mut new_ops); + agg_group.apply_change_to_result_table(&change, &mut this.result_table); } - - // Put the agg group back into the agg group cache. - vars.agg_group_cache.put(key, agg_group); } let columns = builders @@ -757,7 +734,7 @@ mod tests { kind: AggKind::Count, // as row count, index: 0 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -766,7 +743,7 @@ mod tests { kind: AggKind::Count, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -775,7 +752,7 @@ mod tests { kind: AggKind::Count, args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -862,7 +839,7 @@ mod tests { kind: AggKind::Count, // as row count, index: 0 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -871,7 +848,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -881,7 +858,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 2), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -969,7 +946,7 @@ mod tests { kind: AggKind::Count, // as row count, index: 0 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -978,7 +955,7 @@ mod tests { kind: AggKind::Min, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -1071,7 +1048,7 @@ mod tests { kind: AggKind::Count, // as row count, index: 0 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -1080,7 +1057,7 @@ mod tests { kind: AggKind::Min, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, diff --git a/src/stream/src/executor/hash_join.rs b/src/stream/src/executor/hash_join.rs index 470026ee75fdc..4aff917c8cc9c 100644 --- a/src/stream/src/executor/hash_join.rs +++ b/src/stream/src/executor/hash_join.rs @@ -43,7 +43,7 @@ use super::{ Watermark, }; use crate::common::table::state_table::StateTable; -use crate::common::{InfallibleExpression, StreamChunkBuilder}; +use crate::common::StreamChunkBuilder; use crate::executor::expect_first_barrier_from_aligned_stream; use crate::executor::JoinType::LeftAnti; use crate::task::AtomicU64Ref; @@ -52,6 +52,10 @@ use crate::task::AtomicU64Ref; /// enum is not supported in const generic. // TODO: Use enum to replace this once [feature(adt_const_params)](https://github.com/rust-lang/rust/issues/95174) get completed. pub type JoinTypePrimitive = u8; + +/// Evict the cache every n rows. +const EVICT_EVERY_N_ROWS: u32 = 1024; + #[allow(non_snake_case, non_upper_case_globals)] pub mod JoinType { use super::JoinTypePrimitive; @@ -242,6 +246,8 @@ pub struct HashJoinExecutor, /// The maximum size of the chunk produced by executor at a time chunk_size: usize, + /// Count the messages received, clear to 0 when counted to `EVICT_EVERY_N_MESSAGES` + cnt_rows_received: u32, /// watermark column index -> `BufferedWatermarks` watermark_buffers: BTreeMap>, @@ -603,6 +609,7 @@ impl HashJoinExecutor HashJoinExecutor HashJoinExecutor HashJoinExecutor HashJoinExecutor, + side_match: &mut JoinSide, + cnt_rows_received: &mut u32, + ) { + *cnt_rows_received += 1; + if *cnt_rows_received == EVICT_EVERY_N_ROWS { + side_update.ht.evict(); + side_match.ht.evict(); + *cnt_rows_received = 0; + } + } + fn handle_watermark( &mut self, side: SideTypePrimitive, @@ -846,6 +868,7 @@ impl HashJoinExecutor HashJoinExecutor, row_matched: &OwnedRow| -> bool { - // TODO(yuhao-su): We should find a better way to eval the expression without concat - // two rows. - // if there are non-equi expressions - if let Some(ref mut cond) = cond { - let new_row = Self::row_concat( - row_update, - side_update.start_pos, - row_matched, - side_match.start_pos, - ); - - cond.eval_row_infallible(&new_row, |err| ctx.on_compute_error(err, identity)) - .map(|s| *s.as_bool()) - .unwrap_or(false) - } else { - true - } - }; - let keys = K::build(&side_update.join_key_indices, chunk.data_chunk())?; for ((op, row), key) in chunk.rows().zip_eq_debug(keys.iter()) { + Self::evict_cache(side_update, side_match, cnt_rows_received); + let matched_rows: Option = Self::hash_eq_match(key, &mut side_match.ht).await?; match op { @@ -897,7 +902,27 @@ impl HashJoinExecutor HashJoinExecutor HashJoinExecutor HashJoinExecutor (StateTable, StateTable) { let column_descs = data_types .iter() .enumerate() .map(|(id, data_type)| ColumnDesc::unnamed(ColumnId::new(id as i32), data_type.clone())) .collect_vec(); - let state_table = StateTable::new_without_distribution_with_prefix_hint_len( + let state_table = StateTable::new_without_distribution( mem_state.clone(), TableId::new(table_id), column_descs, order_types.to_vec(), pk_indices.to_vec(), - prefix_hint_len, ) .await; @@ -1068,13 +1111,13 @@ mod tests { } fn create_cond() -> BoxedExpression { - let left_expr = InputRefExpression::new(DataType::Int64, 1); - let right_expr = InputRefExpression::new(DataType::Int64, 3); - new_binary_expr( - Type::LessThan, + build( + PbType::LessThan, DataType::Boolean, - Box::new(left_expr), - Box::new(right_expr), + vec![ + Box::new(InputRefExpression::new(DataType::Int64, 1)), + Box::new(InputRefExpression::new(DataType::Int64, 3)), + ], ) .unwrap() } @@ -1091,9 +1134,8 @@ mod tests { }; let (tx_l, source_l) = MockSource::channel(schema.clone(), vec![1]); let (tx_r, source_r) = MockSource::channel(schema, vec![1]); - let join_key_indices = vec![0]; - let params_l = JoinParams::new(join_key_indices.clone(), vec![1]); - let params_r = JoinParams::new(join_key_indices.clone(), vec![1]); + let params_l = JoinParams::new(vec![0], vec![1]); + let params_r = JoinParams::new(vec![0], vec![1]); let cond = with_condition.then(create_cond); let mem_state = MemoryStateStore::new(); @@ -1101,20 +1143,18 @@ mod tests { let (state_l, degree_state_l) = create_in_memory_state_table( mem_state.clone(), &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &[0, 1], 0, - join_key_indices.len(), ) .await; let (state_r, degree_state_r) = create_in_memory_state_table( mem_state, &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &[0, 1], 2, - join_key_indices.len(), ) .await; @@ -1160,9 +1200,8 @@ mod tests { }; let (tx_l, source_l) = MockSource::channel(schema.clone(), vec![0]); let (tx_r, source_r) = MockSource::channel(schema, vec![0]); - let join_key_indices = vec![0, 1]; - let params_l = JoinParams::new(join_key_indices.clone(), vec![]); - let params_r = JoinParams::new(join_key_indices.clone(), vec![]); + let params_l = JoinParams::new(vec![0, 1], vec![]); + let params_r = JoinParams::new(vec![0, 1], vec![]); let cond = with_condition.then(create_cond); let mem_state = MemoryStateStore::new(); @@ -1171,13 +1210,12 @@ mod tests { mem_state.clone(), &[DataType::Int64, DataType::Int64, DataType::Int64], &[ - OrderType::Ascending, - OrderType::Ascending, - OrderType::Ascending, + OrderType::ascending(), + OrderType::ascending(), + OrderType::ascending(), ], &[0, 1, 0], 0, - join_key_indices.len(), ) .await; @@ -1185,13 +1223,12 @@ mod tests { mem_state, &[DataType::Int64, DataType::Int64, DataType::Int64], &[ - OrderType::Ascending, - OrderType::Ascending, - OrderType::Ascending, + OrderType::ascending(), + OrderType::ascending(), + OrderType::ascending(), ], &[0, 1, 1], 0, - join_key_indices.len(), ) .await; let schema_len = match T { diff --git a/src/stream/src/executor/hop_window.rs b/src/stream/src/executor/hop_window.rs index 527c8df1598e7..ded6e3fcf4fe3 100644 --- a/src/stream/src/executor/hop_window.rs +++ b/src/stream/src/executor/hop_window.rs @@ -24,13 +24,11 @@ use risingwave_expr::ExprError; use super::error::StreamExecutorError; use super::{ActorContextRef, BoxedExecutor, Executor, ExecutorInfo, Message}; -use crate::common::InfallibleExpression; pub struct HopWindowExecutor { ctx: ActorContextRef, pub input: BoxedExecutor, pub info: ExecutorInfo, - pub time_col_idx: usize, pub window_slide: IntervalUnit, pub window_size: IntervalUnit, @@ -85,6 +83,23 @@ impl Executor for HopWindowExecutor { } impl HopWindowExecutor { + fn derive_watermarks( + input_len: usize, + time_col_idx: usize, + output_indices: &[usize], + ) -> Vec> { + let mut watermark_derivations = vec![vec![]; input_len]; + for (out_i, in_i) in output_indices.iter().enumerate() { + let in_i = *in_i; + if in_i >= input_len { + watermark_derivations[time_col_idx].push(out_i); + } else { + watermark_derivations[in_i].push(out_i); + } + } + watermark_derivations + } + #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(self: Box) { let Self { @@ -95,6 +110,7 @@ impl HopWindowExecutor { window_size, output_indices, info, + time_col_idx, .. } = *self; let units = window_size @@ -111,55 +127,68 @@ impl HopWindowExecutor { let window_start_col_index = input.schema().len(); let window_end_col_index = input.schema().len() + 1; + let watermark_derivations = + Self::derive_watermarks(input.schema().len(), time_col_idx, &output_indices); #[for_await] for msg in input.execute() { let msg = msg?; - if let Message::Chunk(chunk) = msg { - // TODO: compact may be not necessary here. - let chunk = chunk.compact(); - let (data_chunk, ops) = chunk.into_parts(); - // SAFETY: Already compacted. - assert!(matches!(data_chunk.vis(), Vis::Compact(_))); - let _len = data_chunk.cardinality(); - for i in 0..units { - let window_start_col = if output_indices.contains(&window_start_col_index) { - Some( - self.window_start_exprs[i].eval_infallible(&data_chunk, |err| { - ctx.on_compute_error(err, &info.identity) - }), - ) - } else { - None - }; - let window_end_col = if output_indices.contains(&window_end_col_index) { - Some( - self.window_end_exprs[i].eval_infallible(&data_chunk, |err| { - ctx.on_compute_error(err, &info.identity) - }), - ) - } else { - None - }; - let new_cols = output_indices - .iter() - .filter_map(|&idx| { - if idx < window_start_col_index { - Some(data_chunk.column_at(idx).clone()) - } else if idx == window_start_col_index { - Some(Column::new(window_start_col.clone().unwrap())) - } else if idx == window_end_col_index { - Some(Column::new(window_end_col.clone().unwrap())) - } else { - None - } - }) - .collect(); - let new_chunk = StreamChunk::new(ops.clone(), new_cols, None); - yield Message::Chunk(new_chunk); + match msg { + Message::Chunk(chunk) => { + // TODO: compact may be not necessary here. + let chunk = chunk.compact(); + let (data_chunk, ops) = chunk.into_parts(); + // SAFETY: Already compacted. + assert!(matches!(data_chunk.vis(), Vis::Compact(_))); + let _len = data_chunk.cardinality(); + for i in 0..units { + let window_start_col = if output_indices.contains(&window_start_col_index) { + Some( + self.window_start_exprs[i] + .eval_infallible(&data_chunk, |err| { + ctx.on_compute_error(err, &info.identity) + }) + .await, + ) + } else { + None + }; + let window_end_col = if output_indices.contains(&window_end_col_index) { + Some( + self.window_end_exprs[i] + .eval_infallible(&data_chunk, |err| { + ctx.on_compute_error(err, &info.identity) + }) + .await, + ) + } else { + None + }; + let new_cols = output_indices + .iter() + .filter_map(|&idx| { + if idx < window_start_col_index { + Some(data_chunk.column_at(idx).clone()) + } else if idx == window_start_col_index { + Some(Column::new(window_start_col.clone().unwrap())) + } else if idx == window_end_col_index { + Some(Column::new(window_end_col.clone().unwrap())) + } else { + None + } + }) + .collect(); + let new_chunk = StreamChunk::new(ops.clone(), new_cols, None); + yield Message::Chunk(new_chunk); + } + } + Message::Barrier(b) => { + yield Message::Barrier(b); + } + Message::Watermark(w) => { + for i in &watermark_derivations[w.col_idx] { + yield Message::Watermark(w.clone().with_idx(*i)); + } } - } else { - yield msg; - continue; }; } } @@ -170,12 +199,13 @@ mod tests { use futures::StreamExt; use risingwave_common::array::stream_chunk::StreamChunkTestExt; use risingwave_common::catalog::{Field, Schema}; + use risingwave_common::types::test_utils::IntervalUnitTestExt; use risingwave_common::types::{DataType, IntervalUnit}; use risingwave_expr::expr::test_utils::make_hop_window_expression; - use crate::executor::test_utils::MockSource; - use crate::executor::{ActorContext, Executor, ExecutorInfo, StreamChunk}; - + use super::super::*; + use crate::executor::test_utils::{MessageSender, MockSource}; + use crate::executor::{ActorContext, Executor, ExecutorInfo, ScalarImpl, StreamChunk}; fn create_executor(output_indices: Vec) -> Box { let field1 = Field::unnamed(DataType::Int64); let field2 = Field::unnamed(DataType::Int64); @@ -199,8 +229,15 @@ mod tests { MockSource::with_chunks(schema.clone(), pk_indices.clone(), vec![chunk]).boxed(); let window_slide = IntervalUnit::from_minutes(15); let window_size = IntervalUnit::from_minutes(30); - let (window_start_exprs, window_end_exprs) = - make_hop_window_expression(DataType::Timestamp, 2, window_size, window_slide).unwrap(); + let window_offset = IntervalUnit::from_minutes(0); + let (window_start_exprs, window_end_exprs) = make_hop_window_expression( + DataType::Timestamp, + 2, + window_size, + window_slide, + window_offset, + ) + .unwrap(); super::HopWindowExecutor::new( ActorContext::create(123), @@ -304,4 +341,228 @@ mod tests { ) ); } + + fn create_executor2(output_indices: Vec) -> (MessageSender, Box) { + let field1 = Field::unnamed(DataType::Int64); + let field2 = Field::unnamed(DataType::Int64); + let field3 = Field::with_name(DataType::Timestamp, "created_at"); + let schema = Schema::new(vec![field1, field2, field3]); + let pk_indices = vec![0]; + let (tx, source) = MockSource::channel(schema.clone(), pk_indices.clone()); + + let window_slide = IntervalUnit::from_minutes(15); + let window_size = IntervalUnit::from_minutes(30); + let offset = IntervalUnit::from_minutes(0); + let (window_start_exprs, window_end_exprs) = + make_hop_window_expression(DataType::Timestamp, 2, window_size, window_slide, offset) + .unwrap(); + + ( + tx, + super::HopWindowExecutor::new( + ActorContext::create(123), + Box::new(source), + ExecutorInfo { + // TODO: the schema is incorrect, but it seems useless here. + schema, + pk_indices, + identity: "test".to_string(), + }, + 2, + window_slide, + window_size, + window_start_exprs, + window_end_exprs, + output_indices, + ) + .boxed(), + ) + } + + #[tokio::test] + async fn test_watermark_full_output() { + let (mut tx, hop) = create_executor2((0..5).collect()); + let mut hop = hop.execute(); + + // TODO: the datatype is incorrect, but it seems useless here. + tx.push_int64_watermark(0, 100); + tx.push_int64_watermark(1, 100); + tx.push_int64_watermark(2, 100); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 0, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 1, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 2, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 3, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 4, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + } + + #[tokio::test] + async fn test_watermark_output_indices1() { + let (mut tx, hop) = create_executor2(vec![4, 1, 0, 2]); + let mut hop = hop.execute(); + + // TODO: the datatype is incorrect, but it seems useless here. + tx.push_int64_watermark(0, 100); + tx.push_int64_watermark(1, 100); + tx.push_int64_watermark(2, 100); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 2, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 1, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 0, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 3, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + } + + #[tokio::test] + async fn test_watermark_output_indices2() { + let (mut tx, hop) = create_executor2(vec![4, 1, 5, 0, 2]); + let mut hop = hop.execute(); + + // TODO: the datatype is incorrect, but it seems useless here. + tx.push_int64_watermark(0, 100); + tx.push_int64_watermark(1, 100); + tx.push_int64_watermark(2, 100); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 3, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 1, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 0, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 2, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + + let w = hop.next().await.unwrap().unwrap(); + let w = w.as_watermark().unwrap(); + assert_eq!( + w, + &Watermark { + col_idx: 4, + data_type: DataType::Int64, + val: ScalarImpl::Int64(100) + } + ); + } } diff --git a/src/stream/src/executor/integration_tests.rs b/src/stream/src/executor/integration_tests.rs index 4821edecf93aa..4eae8cfc7abf1 100644 --- a/src/stream/src/executor/integration_tests.rs +++ b/src/stream/src/executor/integration_tests.rs @@ -58,7 +58,7 @@ async fn test_merger_sum_aggr() { kind: AggKind::Count, args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -67,7 +67,7 @@ async fn test_merger_sum_aggr() { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -124,7 +124,9 @@ async fn test_merger_sum_aggr() { let dispatcher = DispatchExecutor::new( receiver_op, vec![DispatcherImpl::RoundRobin(RoundRobinDataDispatcher::new( - inputs, 0, + inputs, + vec![0], + 0, ))], 0, ctx, @@ -154,7 +156,7 @@ async fn test_merger_sum_aggr() { kind: AggKind::Sum0, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -163,7 +165,7 @@ async fn test_merger_sum_aggr() { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -172,7 +174,7 @@ async fn test_merger_sum_aggr() { kind: AggKind::Count, // as row count, index: 2 args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only, filter: None, distinct: false, @@ -194,6 +196,7 @@ async fn test_merger_sum_aggr() { ], 3, MultiMap::new(), + 0.0, ); let items = Arc::new(Mutex::new(vec![])); diff --git a/src/stream/src/executor/local_simple_agg.rs b/src/stream/src/executor/local_simple_agg.rs index b0c5889fd8d54..aa35d20ce700f 100644 --- a/src/stream/src/executor/local_simple_agg.rs +++ b/src/stream/src/executor/local_simple_agg.rs @@ -52,7 +52,7 @@ impl Executor for LocalSimpleAggExecutor { } impl LocalSimpleAggExecutor { - fn apply_chunk( + async fn apply_chunk( ctx: &ActorContextRef, identity: &str, agg_calls: &[AggCall], @@ -61,19 +61,19 @@ impl LocalSimpleAggExecutor { ) -> StreamExecutorResult<()> { let capacity = chunk.capacity(); let (ops, columns, visibility) = chunk.into_inner(); - let visibilities: Vec<_> = agg_calls - .iter() - .map(|agg_call| { - agg_call_filter_res( - ctx, - identity, - agg_call, - &columns, - visibility.as_ref(), - capacity, - ) - }) - .try_collect()?; + let mut visibilities = Vec::with_capacity(agg_calls.len()); + for agg_call in agg_calls { + let result = agg_call_filter_res( + ctx, + identity, + agg_call, + &columns, + visibility.as_ref(), + capacity, + ) + .await?; + visibilities.push(result) + } agg_calls .iter() .zip_eq_fast(visibilities) @@ -118,7 +118,8 @@ impl LocalSimpleAggExecutor { match msg { Message::Watermark(_) => {} Message::Chunk(chunk) => { - Self::apply_chunk(&ctx, &info.identity, &agg_calls, &mut aggregators, chunk)?; + Self::apply_chunk(&ctx, &info.identity, &agg_calls, &mut aggregators, chunk) + .await?; is_dirty = true; } m @ Message::Barrier(_) => { @@ -203,7 +204,7 @@ mod tests { kind: AggKind::Count, args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -262,7 +263,7 @@ mod tests { kind: AggKind::Count, args: AggArgs::None, return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -271,7 +272,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 0), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, @@ -280,7 +281,7 @@ mod tests { kind: AggKind::Sum, args: AggArgs::Unary(DataType::Int64, 1), return_type: DataType::Int64, - order_pairs: vec![], + column_orders: vec![], append_only: false, filter: None, distinct: false, diff --git a/src/stream/src/executor/lookup.rs b/src/stream/src/executor/lookup.rs index db068fefc9b5b..94e9eeab76742 100644 --- a/src/stream/src/executor/lookup.rs +++ b/src/stream/src/executor/lookup.rs @@ -28,6 +28,8 @@ mod impl_; pub use impl_::LookupExecutorParams; +use super::ActorContextRef; + #[cfg(test)] mod tests; @@ -38,6 +40,8 @@ mod tests; /// The output schema is `| stream columns | arrangement columns |`. /// The input is required to be first stream and then arrangement. pub struct LookupExecutor { + ctx: ActorContextRef, + /// the data types of the produced data chunk inside lookup (before reordering) chunk_data_types: Vec, diff --git a/src/stream/src/executor/lookup/cache.rs b/src/stream/src/executor/lookup/cache.rs index 498cb164a9620..0922b0b7a3323 100644 --- a/src/stream/src/executor/lookup/cache.rs +++ b/src/stream/src/executor/lookup/cache.rs @@ -64,6 +64,11 @@ impl LookupCache { self.data.update_epoch(epoch); } + /// Clear the cache. + pub fn clear(&mut self) { + self.data.clear(); + } + pub fn new(watermark_epoch: AtomicU64Ref) -> Self { let cache = ExecutorCache::new(new_unbounded(watermark_epoch)); Self { data: cache } diff --git a/src/stream/src/executor/lookup/impl_.rs b/src/stream/src/executor/lookup/impl_.rs index d80ef825f016a..375c307054724 100644 --- a/src/stream/src/executor/lookup/impl_.rs +++ b/src/stream/src/executor/lookup/impl_.rs @@ -20,7 +20,7 @@ use risingwave_common::catalog::{ColumnDesc, Schema}; use risingwave_common::row::{OwnedRow, Row, RowExt}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_storage::store::PrefetchOptions; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -28,16 +28,19 @@ use risingwave_storage::table::TableIter; use risingwave_storage::StateStore; use super::sides::{stream_lookup_arrange_prev_epoch, stream_lookup_arrange_this_epoch}; +use crate::cache::cache_may_stale; use crate::common::StreamChunkBuilder; use crate::executor::error::{StreamExecutorError, StreamExecutorResult}; use crate::executor::lookup::cache::LookupCache; use crate::executor::lookup::sides::{ArrangeJoinSide, ArrangeMessage, StreamJoinSide}; use crate::executor::lookup::LookupExecutor; -use crate::executor::{Barrier, Executor, Message, PkIndices}; +use crate::executor::{ActorContextRef, Barrier, Executor, Message, PkIndices}; use crate::task::AtomicU64Ref; /// Parameters for [`LookupExecutor`]. pub struct LookupExecutorParams { + pub ctx: ActorContextRef, + /// The side for arrangement. Currently, it should be a /// `MaterializeExecutor`. pub arrangement: Box, @@ -54,7 +57,7 @@ pub struct LookupExecutorParams { /// should contain all 3 columns. pub arrangement_col_descs: Vec, - /// Should only contain [`OrderPair`] for arrange in the arrangement. + /// Should only contain [`ColumnOrder`] for arrange in the arrangement. /// /// Still using the above `a, b, _row_id` example. If we create an arrangement with join key /// `a`, there will be 3 elements in `arrangement_col_descs`, and only 1 element in @@ -65,7 +68,7 @@ pub struct LookupExecutorParams { /// /// For the MV pk, they will only be contained in `arrangement_col_descs`, without being part /// of this `arrangement_order_rules`. - pub arrangement_order_rules: Vec, + pub arrangement_order_rules: Vec, /// Primary key indices of the lookup result (after reordering). /// @@ -116,6 +119,7 @@ pub struct LookupExecutorParams { impl LookupExecutor { pub fn new(params: LookupExecutorParams) -> Self { let LookupExecutorParams { + ctx, arrangement, stream, arrangement_col_descs, @@ -161,7 +165,7 @@ impl LookupExecutor { arrange_join_key_indices.sort_unstable(); let mut arrangement_order_types_indices = arrangement_order_rules .iter() - .map(|x| x.column_idx) + .map(|x| x.column_index) .collect_vec(); arrangement_order_types_indices.sort_unstable(); assert_eq!( @@ -182,7 +186,7 @@ impl LookupExecutor { // resolve mapping from join keys in stream row -> joins keys for arrangement. let key_indices_mapping = arrangement_order_rules .iter() - .map(|x| x.column_idx) // the required column idx in this position + .map(|x| x.column_index) // the required column idx in this position .filter_map(|x| arrange_join_key_indices.iter().position(|y| *y == x)) // the position of the item in join keys .map(|x| stream_join_key_indices[x]) // the actual column idx in stream .collect_vec(); @@ -202,6 +206,7 @@ impl LookupExecutor { ); Self { + ctx, chunk_data_types, schema: output_schema, pk_indices, @@ -273,10 +278,8 @@ impl LookupExecutor { self.lookup_cache.flush(); } - // Use the new stream barrier epoch as new cache epoch - self.lookup_cache.update_epoch(barrier.epoch.curr); + self.process_barrier(&barrier); - self.process_barrier(barrier.clone()).await?; if self.arrangement.use_current_epoch { // When lookup this epoch, stream side barrier always come after arrangement // ready, so we can forward barrier now. @@ -336,11 +339,23 @@ impl LookupExecutor { } } - /// Store the barrier. - #[expect(clippy::unused_async)] - async fn process_barrier(&mut self, barrier: Barrier) -> StreamExecutorResult<()> { - self.last_barrier = Some(barrier); - Ok(()) + /// Process the barrier and apply changes if necessary. + fn process_barrier(&mut self, barrier: &Barrier) { + if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(self.ctx.id) { + let previous_vnode_bitmap = self + .arrangement + .storage_table + .update_vnode_bitmap(vnode_bitmap.clone()); + + // Manipulate the cache if necessary. + if cache_may_stale(&previous_vnode_bitmap, &vnode_bitmap) { + self.lookup_cache.clear(); + } + } + + // Use the new stream barrier epoch as new cache epoch + self.lookup_cache.update_epoch(barrier.epoch.curr); + self.last_barrier = Some(barrier.clone()); } /// Lookup all rows corresponding to a join key in shared buffer. diff --git a/src/stream/src/executor/lookup/sides.rs b/src/stream/src/executor/lookup/sides.rs index e4ab39327c53c..403f985093935 100644 --- a/src/stream/src/executor/lookup/sides.rs +++ b/src/stream/src/executor/lookup/sides.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use anyhow::Context; use either::Either; use futures::stream::PollNext; use futures::StreamExt; @@ -19,7 +20,7 @@ use futures_async_stream::try_stream; use risingwave_common::array::StreamChunk; use risingwave_common::catalog::ColumnDesc; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_storage::table::batch_table::storage_table::StorageTable; use risingwave_storage::StateStore; @@ -61,7 +62,7 @@ pub(crate) struct ArrangeJoinSide { /// Order rules of the arrangement (only join key is needed, pk should not be included, used /// for lookup) - pub order_rules: Vec, + pub order_rules: Vec, /// Key indices for the join /// @@ -247,7 +248,7 @@ pub async fn stream_lookup_arrange_prev_epoch( match input .next() .await - .expect("unexpected close of barrier aligner")? + .context("unexpected close of barrier aligner")?? { Either::Left(Message::Watermark(_)) => { todo!("https://github.com/risingwavelabs/risingwave/issues/6042") @@ -298,7 +299,7 @@ pub async fn stream_lookup_arrange_this_epoch( match input .next() .await - .expect("unexpected close of barrier aligner")? + .context("unexpected close of barrier aligner")?? { Either::Left(Message::Chunk(msg)) => { // Should wait until arrangement from this epoch is available. @@ -333,7 +334,7 @@ pub async fn stream_lookup_arrange_this_epoch( match input .next() .await - .expect("unexpected close of barrier aligner")? + .context("unexpected close of barrier aligner")?? { Either::Left(Message::Chunk(msg)) => yield ArrangeMessage::Stream(msg), Either::Left(Message::Barrier(b)) => { @@ -355,7 +356,7 @@ pub async fn stream_lookup_arrange_this_epoch( match input .next() .await - .expect("unexpected close of barrier aligner")? + .context("unexpected close of barrier aligner")?? { Either::Left(_) => unreachable!(), Either::Right(Message::Chunk(chunk)) => { diff --git a/src/stream/src/executor/lookup/tests.rs b/src/stream/src/executor/lookup/tests.rs index 22fed3d528801..5fda9504d17f7 100644 --- a/src/stream/src/executor/lookup/tests.rs +++ b/src/stream/src/executor/lookup/tests.rs @@ -20,9 +20,9 @@ use futures::StreamExt; use itertools::Itertools; use risingwave_common::array::stream_chunk::StreamChunkTestExt; use risingwave_common::array::StreamChunk; -use risingwave_common::catalog::{ColumnDesc, ColumnId, ConflictBehavior, Field, Schema, TableId}; +use risingwave_common::catalog::{ColumnDesc, ConflictBehavior, Field, Schema, TableId}; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::{OrderPair, OrderType}; +use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -30,37 +30,25 @@ use crate::executor::lookup::impl_::LookupExecutorParams; use crate::executor::lookup::LookupExecutor; use crate::executor::test_utils::*; use crate::executor::{ - Barrier, BoxedMessageStream, Executor, MaterializeExecutor, Message, PkIndices, + ActorContext, Barrier, BoxedMessageStream, Executor, MaterializeExecutor, Message, PkIndices, }; fn arrangement_col_descs() -> Vec { vec![ - ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(0), - name: "rowid_column".to_string(), - field_descs: vec![], - type_name: "".to_string(), - }, - ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(1), - name: "join_column".to_string(), - field_descs: vec![], - type_name: "".to_string(), - }, + ColumnDesc::new_atomic(DataType::Int64, "rowid_column", 0), + ColumnDesc::new_atomic(DataType::Int64, "join_column", 1), ] } -fn arrangement_col_arrange_rules() -> Vec { +fn arrangement_col_arrange_rules() -> Vec { vec![ - OrderPair::new(1, OrderType::Ascending), - OrderPair::new(0, OrderType::Ascending), + ColumnOrder::new(1, OrderType::ascending()), + ColumnOrder::new(0, OrderType::ascending()), ] } -fn arrangement_col_arrange_rules_join_key() -> Vec { - vec![OrderPair::new(1, OrderType::Ascending)] +fn arrangement_col_arrange_rules_join_key() -> Vec { + vec![ColumnOrder::new(1, OrderType::ascending())] } /// Create a test arrangement. @@ -152,20 +140,8 @@ async fn create_arrangement( /// | b | | | 3 -> 4 | fn create_source() -> Box { let columns = vec![ - ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(1), - name: "join_column".to_string(), - field_descs: vec![], - type_name: "".to_string(), - }, - ColumnDesc { - data_type: DataType::Int64, - column_id: ColumnId::new(2), - name: "rowid_column".to_string(), - field_descs: vec![], - type_name: "".to_string(), - }, + ColumnDesc::new_atomic(DataType::Int64, "join_column", 1), + ColumnDesc::new_atomic(DataType::Int64, "rowid_column", 2), ]; // Prepare source chunks. @@ -218,6 +194,7 @@ async fn test_lookup_this_epoch() { let arrangement = create_arrangement(table_id, store.clone()).await; let stream = create_source(); let lookup_executor = Box::new(LookupExecutor::new(LookupExecutorParams { + ctx: ActorContext::create(0), arrangement, stream, arrangement_col_descs: arrangement_col_descs(), @@ -281,14 +258,13 @@ async fn test_lookup_this_epoch() { } #[tokio::test] -#[ignore] -// Deprecated because the ability to read from prev epoch has been deprecated. async fn test_lookup_last_epoch() { let store = MemoryStateStore::new(); let table_id = TableId::new(1); let arrangement = create_arrangement(table_id, store.clone()).await; let stream = create_source(); let lookup_executor = Box::new(LookupExecutor::new(LookupExecutorParams { + ctx: ActorContext::create(0), arrangement, stream, arrangement_col_descs: arrangement_col_descs(), diff --git a/src/stream/src/executor/managed_state/join/mod.rs b/src/stream/src/executor/managed_state/join/mod.rs index 33456ff859f90..e0988f23e3b8f 100644 --- a/src/stream/src/executor/managed_state/join/mod.rs +++ b/src/stream/src/executor/managed_state/join/mod.rs @@ -36,7 +36,7 @@ use risingwave_common::util::sort_util::OrderType; use risingwave_storage::store::PrefetchOptions; use risingwave_storage::StateStore; -use crate::cache::{cache_may_stale, new_with_hasher_in, ExecutorCache}; +use crate::cache::{new_with_hasher_in, ExecutorCache}; use crate::common::table::state_table::StateTable; use crate::executor::error::StreamExecutorResult; use crate::executor::monitor::StreamingMetrics; @@ -161,7 +161,6 @@ pub struct JoinHashMapMetrics { total_lookup_count: usize, /// How many times have we miss the cache when insert row insert_cache_miss_count: usize, - may_exist_true_count: usize, } impl JoinHashMapMetrics { @@ -173,7 +172,6 @@ impl JoinHashMapMetrics { lookup_miss_count: 0, total_lookup_count: 0, insert_cache_miss_count: 0, - may_exist_true_count: 0, } } @@ -190,14 +188,9 @@ impl JoinHashMapMetrics { .join_insert_cache_miss_count .with_label_values(&[&self.actor_id, self.side]) .inc_by(self.insert_cache_miss_count as u64); - self.metrics - .join_may_exist_true_count - .with_label_values(&[&self.actor_id, self.side]) - .inc_by(self.may_exist_true_count as u64); self.total_lookup_count = 0; self.lookup_miss_count = 0; self.insert_cache_miss_count = 0; - self.may_exist_true_count = 0; } } @@ -270,7 +263,7 @@ impl JoinHashMap { .collect(); let pk_serializer = OrderedRowSerde::new( pk_data_types, - vec![OrderType::Ascending; state_pk_indices.len()], + vec![OrderType::ascending(); state_pk_indices.len()], ); let state = TableInner { @@ -318,16 +311,16 @@ impl JoinHashMap { } /// Update the vnode bitmap and manipulate the cache if necessary. - pub fn update_vnode_bitmap(&mut self, vnode_bitmap: Arc) { - let previous_vnode_bitmap = self.state.table.update_vnode_bitmap(vnode_bitmap.clone()); - let _ = self - .degree_state - .table - .update_vnode_bitmap(vnode_bitmap.clone()); - - if cache_may_stale(&previous_vnode_bitmap, &vnode_bitmap) { + pub fn update_vnode_bitmap(&mut self, vnode_bitmap: Arc) -> bool { + let (_previous_vnode_bitmap, cache_may_stale) = + self.state.table.update_vnode_bitmap(vnode_bitmap.clone()); + let _ = self.degree_state.table.update_vnode_bitmap(vnode_bitmap); + + if cache_may_stale { self.inner.clear(); } + + cache_may_stale } pub fn update_watermark(&mut self, watermark: ScalarImpl) { @@ -365,11 +358,14 @@ impl JoinHashMap { let mut entry_state = JoinEntryState::default(); if self.need_degree_table { - let table_iter_fut = self.state.table.iter_key_and_val(&key, Default::default()); + let table_iter_fut = self + .state + .table + .iter_key_and_val(&key, PrefetchOptions::new_for_exhaust_iter()); let degree_table_iter_fut = self .degree_state .table - .iter_key_and_val(&key, Default::default()); + .iter_key_and_val(&key, PrefetchOptions::new_for_exhaust_iter()); let (table_iter, degree_table_iter) = try_join(table_iter_fut, degree_table_iter_fut).await?; @@ -432,22 +428,11 @@ impl JoinHashMap { // Update cache entry.insert(pk, value.encode()); } else if self.pk_contained_in_jk { - // Refill cache when the join key contains primary key. + // Refill cache when the join key exist in neither cache or storage. self.metrics.insert_cache_miss_count += 1; let mut state = JoinEntryState::default(); state.insert(pk, value.encode()); self.update_state(key, state.into()); - } else { - let prefix = key.deserialize(&self.join_key_data_types)?; - self.metrics.insert_cache_miss_count += 1; - // Refill cache when the join key exists in neither cache or storage. - if !self.state.table.may_exist(&prefix).await? { - let mut state = JoinEntryState::default(); - state.insert(pk, value.encode()); - self.update_state(key, state.into()); - } else { - self.metrics.may_exist_true_count += 1; - } } // Update the flush buffer. @@ -459,6 +444,7 @@ impl JoinHashMap { /// Insert a row. /// Used when the side does not need to update degree. + #[allow(clippy::unused_async)] pub async fn insert_row(&mut self, key: &K, value: impl Row) -> StreamExecutorResult<()> { let join_row = JoinRow::new(&value, 0); let pk = (&value) @@ -468,22 +454,11 @@ impl JoinHashMap { // Update cache entry.insert(pk, join_row.encode()); } else if self.pk_contained_in_jk { - // Refill cache when the join key contains primary key. + // Refill cache when the join key exist in neither cache or storage. self.metrics.insert_cache_miss_count += 1; let mut state = JoinEntryState::default(); state.insert(pk, join_row.encode()); self.update_state(key, state.into()); - } else { - let prefix = key.deserialize(&self.join_key_data_types)?; - self.metrics.insert_cache_miss_count += 1; - // Refill cache when the join key exists in neither cache or storage. - if !self.state.table.may_exist(&prefix).await? { - let mut state = JoinEntryState::default(); - state.insert(pk, join_row.encode()); - self.update_state(key, state.into()); - } else { - self.metrics.may_exist_true_count += 1; - } } // Update the flush buffer. diff --git a/src/stream/src/executor/managed_state/top_n/top_n_state.rs b/src/stream/src/executor/managed_state/top_n/top_n_state.rs index 41a991d1b0502..8dee8fb7fba8e 100644 --- a/src/stream/src/executor/managed_state/top_n/top_n_state.rs +++ b/src/stream/src/executor/managed_state/top_n/top_n_state.rs @@ -251,7 +251,7 @@ impl ManagedTopNState { mod tests { use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; - use risingwave_common::util::sort_util::{OrderPair, OrderType}; + use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; // use std::collections::BTreeMap; use super::*; @@ -264,11 +264,11 @@ mod tests { let data_types = vec![DataType::Varchar, DataType::Int64]; let schema = Schema::new(data_types.into_iter().map(Field::unnamed).collect()); let storage_key = vec![ - OrderPair::new(0, OrderType::Ascending), - OrderPair::new(1, OrderType::Ascending), + ColumnOrder::new(0, OrderType::ascending()), + ColumnOrder::new(1, OrderType::ascending()), ]; let pk = vec![0, 1]; - let order_by = vec![OrderPair::new(0, OrderType::Ascending)]; + let order_by = vec![ColumnOrder::new(0, OrderType::ascending())]; create_cache_key_serde(&storage_key, &pk, &schema, &order_by, &[]) } @@ -278,7 +278,7 @@ mod tests { let state_table = { let mut tb = create_in_memory_state_table( &[DataType::Varchar, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &[0, 1], ) .await; @@ -354,10 +354,11 @@ mod tests { #[tokio::test] async fn test_managed_top_n_state_fill_cache() { + let data_types = vec![DataType::Varchar, DataType::Int64]; let state_table = { let mut tb = create_in_memory_state_table( - &[DataType::Varchar, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &data_types, + &[OrderType::ascending(), OrderType::ascending()], &[0, 1], ) .await; @@ -382,7 +383,7 @@ mod tests { let rows = vec![row1, row2, row3, row4, row5]; let ordered_rows = vec![row1_bytes, row2_bytes, row3_bytes, row4_bytes, row5_bytes]; - let mut cache = TopNCache::::new(1, 1); + let mut cache = TopNCache::::new(1, 1, data_types); managed_state.insert(rows[3].clone()); managed_state.insert(rows[1].clone()); diff --git a/src/stream/src/executor/merge.rs b/src/stream/src/executor/merge.rs index 40fd5fe5c1976..27a74b44cbcee 100644 --- a/src/stream/src/executor/merge.rs +++ b/src/stream/src/executor/merge.rs @@ -144,6 +144,21 @@ impl MergeExecutor { ); barrier.passed_actors.push(actor_id); + if let Some(Mutation::Update { dispatchers, .. }) = barrier.mutation.as_deref() + { + if select_all + .upstream_actor_ids() + .iter() + .any(|actor_id| dispatchers.contains_key(actor_id)) + { + // `Watermark` of upstream may become stale after downstream scaling. + select_all + .buffered_watermarks + .values_mut() + .for_each(|buffers| buffers.clear()); + } + } + if let Some(update) = barrier.as_update_merge(self.actor_context.id, self.upstream_fragment_id) { diff --git a/src/stream/src/executor/mod.rs b/src/stream/src/executor/mod.rs index 60282bd5dc594..d0925c5d85880 100644 --- a/src/stream/src/executor/mod.rs +++ b/src/stream/src/executor/mod.rs @@ -20,6 +20,7 @@ use await_tree::InstrumentAwait; use enum_as_inner::EnumAsInner; use futures::stream::BoxStream; use futures::{Stream, StreamExt}; +use futures_async_stream::try_stream; use itertools::Itertools; use minitrace::prelude::*; use risingwave_common::array::column::Column; @@ -33,20 +34,18 @@ use risingwave_common::util::value_encoding::{deserialize_datum, serialize_datum use risingwave_connector::source::SplitImpl; use risingwave_expr::expr::BoxedExpression; use risingwave_expr::ExprError; -use risingwave_pb::data::{Datum as ProstDatum, Epoch as ProstEpoch}; -use risingwave_pb::expr::InputRef as ProstInputRef; +use risingwave_pb::data::{PbDatum, PbEpoch}; +use risingwave_pb::expr::PbInputRef; use risingwave_pb::stream_plan::add_mutation::Dispatchers; -use risingwave_pb::stream_plan::barrier::Mutation as ProstMutation; +use risingwave_pb::stream_plan::barrier::PbMutation; use risingwave_pb::stream_plan::stream_message::StreamMessage; use risingwave_pb::stream_plan::update_mutation::{DispatcherUpdate, MergeUpdate}; use risingwave_pb::stream_plan::{ - AddMutation, Barrier as ProstBarrier, Dispatcher as ProstDispatcher, PauseMutation, - ResumeMutation, SourceChangeSplitMutation, StopMutation, StreamMessage as ProstStreamMessage, - UpdateMutation, Watermark as ProstWatermark, + AddMutation, PauseMutation, PbBarrier, PbDispatcher, PbStreamMessage, PbWatermark, + ResumeMutation, SourceChangeSplitMutation, StopMutation, UpdateMutation, }; use smallvec::SmallVec; -use crate::common::InfallibleExpression; use crate::error::StreamResult; use crate::task::{ActorId, FragmentId}; @@ -57,6 +56,7 @@ pub mod monitor; pub mod agg_common; pub mod aggregation; +mod barrier_recv; mod batch_query; mod chain; mod dispatch; @@ -81,13 +81,13 @@ mod project_set; mod rearranged_chain; mod receiver; pub mod row_id_gen; -mod simple; mod sink; mod sort; mod sort_buffer; pub mod source; mod stream_reader; pub mod subtask; +mod temporal_join; mod top_n; mod union; mod watermark; @@ -103,6 +103,7 @@ mod test_utils; pub use actor::{Actor, ActorContext, ActorContextRef}; use anyhow::Context; pub use backfill::*; +pub use barrier_recv::BarrierRecvExecutor; pub use batch_query::BatchQueryExecutor; pub use chain::ChainExecutor; pub use dispatch::{DispatchExecutor, DispatcherImpl}; @@ -125,10 +126,10 @@ pub use project_set::*; pub use rearranged_chain::RearrangedChainExecutor; pub use receiver::ReceiverExecutor; use risingwave_pb::source::{ConnectorSplit, ConnectorSplits}; -use simple::{SimpleExecutor, SimpleExecutorWrapper}; pub use sink::SinkExecutor; pub use sort::SortExecutor; pub use source::*; +pub use temporal_join::*; pub use top_n::{ AppendOnlyGroupTopNExecutor, AppendOnlyTopNExecutor, GroupTopNExecutor, TopNExecutor, }; @@ -218,7 +219,7 @@ pub enum Mutation { actor_splits: HashMap>, }, Add { - adds: HashMap>, + adds: HashMap>, // TODO: remove this and use `SourceChangesSplit` after we support multiple mutations. splits: HashMap>, }, @@ -362,9 +363,9 @@ impl Mutation { matches!(self, Mutation::Stop(_)) } - fn to_protobuf(&self) -> ProstMutation { + fn to_protobuf(&self) -> PbMutation { match self { - Mutation::Stop(actors) => ProstMutation::Stop(StopMutation { + Mutation::Stop(actors) => PbMutation::Stop(StopMutation { actors: actors.iter().copied().collect::>(), }), Mutation::Update { @@ -373,7 +374,7 @@ impl Mutation { vnode_bitmaps, dropped_actors, actor_splits, - } => ProstMutation::Update(UpdateMutation { + } => PbMutation::Update(UpdateMutation { dispatcher_update: dispatchers.values().flatten().cloned().collect(), merge_update: merges.values().cloned().collect(), actor_vnode_bitmap_update: vnode_bitmaps @@ -393,7 +394,7 @@ impl Mutation { }) .collect(), }), - Mutation::Add { adds, .. } => ProstMutation::Add(AddMutation { + Mutation::Add { adds, .. } => PbMutation::Add(AddMutation { actor_dispatchers: adds .iter() .map(|(&actor_id, dispatchers)| { @@ -407,37 +408,29 @@ impl Mutation { .collect(), ..Default::default() }), - Mutation::SourceChangeSplit(changes) => { - ProstMutation::Splits(SourceChangeSplitMutation { - actor_splits: changes - .iter() - .map(|(&actor_id, splits)| { - ( - actor_id, - ConnectorSplits { - splits: splits - .clone() - .iter() - .map(ConnectorSplit::from) - .collect(), - }, - ) - }) - .collect(), - }) - } - Mutation::Pause => ProstMutation::Pause(PauseMutation {}), - Mutation::Resume => ProstMutation::Resume(ResumeMutation {}), + Mutation::SourceChangeSplit(changes) => PbMutation::Splits(SourceChangeSplitMutation { + actor_splits: changes + .iter() + .map(|(&actor_id, splits)| { + ( + actor_id, + ConnectorSplits { + splits: splits.clone().iter().map(ConnectorSplit::from).collect(), + }, + ) + }) + .collect(), + }), + Mutation::Pause => PbMutation::Pause(PauseMutation {}), + Mutation::Resume => PbMutation::Resume(ResumeMutation {}), } } - fn from_protobuf(prost: &ProstMutation) -> StreamExecutorResult { + fn from_protobuf(prost: &PbMutation) -> StreamExecutorResult { let mutation = match prost { - ProstMutation::Stop(stop) => { - Mutation::Stop(HashSet::from_iter(stop.get_actors().clone())) - } + PbMutation::Stop(stop) => Mutation::Stop(HashSet::from_iter(stop.get_actors().clone())), - ProstMutation::Update(update) => Mutation::Update { + PbMutation::Update(update) => Mutation::Update { dispatchers: update .dispatcher_update .iter() @@ -470,7 +463,7 @@ impl Mutation { .collect(), }, - ProstMutation::Add(add) => Mutation::Add { + PbMutation::Add(add) => Mutation::Add { adds: add .actor_dispatchers .iter() @@ -494,7 +487,7 @@ impl Mutation { .collect(), }, - ProstMutation::Splits(s) => { + PbMutation::Splits(s) => { let mut change_splits: Vec<(ActorId, Vec)> = Vec::with_capacity(s.actor_splits.len()); for (&actor_id, splits) in &s.actor_splits { @@ -511,15 +504,15 @@ impl Mutation { } Mutation::SourceChangeSplit(change_splits.into_iter().collect()) } - ProstMutation::Pause(_) => Mutation::Pause, - ProstMutation::Resume(_) => Mutation::Resume, + PbMutation::Pause(_) => Mutation::Pause, + PbMutation::Resume(_) => Mutation::Resume, }; Ok(mutation) } } impl Barrier { - pub fn to_protobuf(&self) -> ProstBarrier { + pub fn to_protobuf(&self) -> PbBarrier { let Barrier { epoch, mutation, @@ -527,8 +520,8 @@ impl Barrier { passed_actors, .. }: Barrier = self.clone(); - ProstBarrier { - epoch: Some(ProstEpoch { + PbBarrier { + epoch: Some(PbEpoch { curr: epoch.curr, prev: epoch.prev, }), @@ -539,7 +532,7 @@ impl Barrier { } } - pub fn from_protobuf(prost: &ProstBarrier) -> StreamExecutorResult { + pub fn from_protobuf(prost: &PbBarrier) -> StreamExecutorResult { let mutation = prost .mutation .as_ref() @@ -589,7 +582,7 @@ impl Watermark { } } - pub fn transform_with_expr( + pub async fn transform_with_expr( self, expr: &BoxedExpression, new_col_idx: usize, @@ -605,7 +598,7 @@ impl Watermark { row[col_idx] = Some(val); OwnedRow::new(row) }; - let val = expr.eval_row_infallible(&row, on_err)?; + let val = expr.eval_row_infallible(&row, on_err).await?; Some(Self { col_idx: new_col_idx, data_type, @@ -613,19 +606,28 @@ impl Watermark { }) } - pub fn to_protobuf(&self) -> ProstWatermark { - ProstWatermark { - column: Some(ProstInputRef { + /// Transform the watermark with the given output indices. If this watermark is not in the + /// output, return `None`. + pub fn transform_with_indices(self, output_indices: &[usize]) -> Option { + output_indices + .iter() + .position(|p| *p == self.col_idx) + .map(|new_col_idx| self.with_idx(new_col_idx)) + } + + pub fn to_protobuf(&self) -> PbWatermark { + PbWatermark { + column: Some(PbInputRef { index: self.col_idx as _, r#type: Some(self.data_type.to_protobuf()), }), - val: Some(ProstDatum { + val: Some(PbDatum { body: serialize_datum(Some(&self.val)), }), } } - pub fn from_protobuf(prost: &ProstWatermark) -> StreamExecutorResult { + pub fn from_protobuf(prost: &PbWatermark) -> StreamExecutorResult { let col_ref = prost.get_column()?; let data_type = DataType::from(col_ref.get_type()?); let val = deserialize_datum(prost.get_val()?.get_body().as_slice(), &data_type)? @@ -681,7 +683,7 @@ impl Message { ) } - pub fn to_protobuf(&self) -> ProstStreamMessage { + pub fn to_protobuf(&self) -> PbStreamMessage { let prost = match self { Self::Chunk(stream_chunk) => { let prost_stream_chunk = stream_chunk.to_protobuf(); @@ -690,12 +692,12 @@ impl Message { Self::Barrier(barrier) => StreamMessage::Barrier(barrier.clone().to_protobuf()), Self::Watermark(watermark) => StreamMessage::Watermark(watermark.to_protobuf()), }; - ProstStreamMessage { + PbStreamMessage { stream_message: Some(prost), } } - pub fn from_protobuf(prost: &ProstStreamMessage) -> StreamExecutorResult { + pub fn from_protobuf(prost: &PbStreamMessage) -> StreamExecutorResult { let res = match prost.get_stream_message()? { StreamMessage::StreamChunk(chunk) => Message::Chunk(StreamChunk::from_protobuf(chunk)?), StreamMessage::Barrier(barrier) => Message::Barrier(Barrier::from_protobuf(barrier)?), diff --git a/src/stream/src/executor/monitor/streaming_stats.rs b/src/stream/src/executor/monitor/streaming_stats.rs index 59aa97fa31314..fc6fe6a03f8f4 100644 --- a/src/stream/src/executor/monitor/streaming_stats.rs +++ b/src/stream/src/executor/monitor/streaming_stats.rs @@ -53,7 +53,6 @@ pub struct StreamingMetrics { pub join_lookup_miss_count: GenericCounterVec, pub join_total_lookup_count: GenericCounterVec, pub join_insert_cache_miss_count: GenericCounterVec, - pub join_may_exist_true_count: GenericCounterVec, pub join_actor_input_waiting_duration_ns: GenericCounterVec, pub join_match_duration_ns: GenericCounterVec, pub join_barrier_align_duration: HistogramVec, @@ -275,7 +274,7 @@ impl StreamingMetrics { let join_lookup_miss_count = register_int_counter_vec_with_registry!( "stream_join_lookup_miss_count", - "Join executor lookup miss count", + "Join executor lookup miss duration", &["actor_id", "side"], registry ) @@ -283,7 +282,7 @@ impl StreamingMetrics { let join_total_lookup_count = register_int_counter_vec_with_registry!( "stream_join_lookup_total_count", - "Join executor lookup total count", + "Join executor lookup total operation", &["actor_id", "side"], registry ) @@ -291,15 +290,7 @@ impl StreamingMetrics { let join_insert_cache_miss_count = register_int_counter_vec_with_registry!( "stream_join_insert_cache_miss_count", - "Count of cache miss when insert rows in join executor", - &["actor_id", "side"], - registry - ) - .unwrap(); - - let join_may_exist_true_count = register_int_counter_vec_with_registry!( - "stream_join_may_exist_true_count", - "Count of may_exist's true returns of when insert rows in join executor", + "Join executor cache miss when insert operation", &["actor_id", "side"], registry ) @@ -486,7 +477,6 @@ impl StreamingMetrics { join_lookup_miss_count, join_total_lookup_count, join_insert_cache_miss_count, - join_may_exist_true_count, join_actor_input_waiting_duration_ns, join_match_duration_ns, join_barrier_align_duration, diff --git a/src/stream/src/executor/mview/materialize.rs b/src/stream/src/executor/mview/materialize.rs index 675d35406e896..5ae1d4e5cad63 100644 --- a/src/stream/src/executor/mview/materialize.rs +++ b/src/stream/src/executor/mview/materialize.rs @@ -29,7 +29,7 @@ use risingwave_common::types::DataType; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_common::util::iter_util::{ZipEqDebug, ZipEqFast}; use risingwave_common::util::ordered::OrderedRowSerde; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_common::util::value_encoding::{BasicSerde, ValueRowSerde}; use risingwave_pb::catalog::Table; use risingwave_storage::mem_table::KeyOp; @@ -69,7 +69,7 @@ impl MaterializeExecutor { pub async fn new( input: BoxedExecutor, store: S, - key: Vec, + key: Vec, executor_id: u64, actor_context: ActorContextRef, vnodes: Option>, @@ -77,7 +77,7 @@ impl MaterializeExecutor { watermark_epoch: AtomicU64Ref, conflict_behavior: ConflictBehavior, ) -> Self { - let arrange_columns: Vec = key.iter().map(|k| k.column_idx).collect(); + let arrange_columns: Vec = key.iter().map(|k| k.column_index).collect(); let schema = input.schema().clone(); @@ -188,13 +188,13 @@ impl MaterializeExecutor { input: BoxedExecutor, store: S, table_id: TableId, - keys: Vec, + keys: Vec, column_ids: Vec, executor_id: u64, watermark_epoch: AtomicU64Ref, conflict_behavior: ConflictBehavior, ) -> Self { - let arrange_columns: Vec = keys.iter().map(|k| k.column_idx).collect(); + let arrange_columns: Vec = keys.iter().map(|k| k.column_index).collect(); let arrange_order_types = keys.iter().map(|k| k.order_type).collect(); let schema = input.schema().clone(); let columns = column_ids @@ -583,7 +583,7 @@ mod tests { use risingwave_common::catalog::{ColumnDesc, ConflictBehavior, Field, Schema, TableId}; use risingwave_common::row::OwnedRow; use risingwave_common::types::DataType; - use risingwave_common::util::sort_util::{OrderPair, OrderType}; + use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_hummock_sdk::HummockReadEpoch; use risingwave_storage::memory::MemoryStateStore; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -629,7 +629,7 @@ mod tests { ], ); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -649,7 +649,7 @@ mod tests { Box::new(source), memory_state_store, table_id, - vec![OrderPair::new(0, OrderType::Ascending)], + vec![ColumnOrder::new(0, OrderType::ascending())], column_ids, 1, Arc::new(AtomicU64::new(0)), @@ -746,7 +746,7 @@ mod tests { ], ); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -766,7 +766,7 @@ mod tests { Box::new(source), memory_state_store, table_id, - vec![OrderPair::new(0, OrderType::Ascending)], + vec![ColumnOrder::new(0, OrderType::ascending())], column_ids, 1, Arc::new(AtomicU64::new(0)), @@ -879,7 +879,7 @@ mod tests { ], ); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -899,7 +899,7 @@ mod tests { Box::new(source), memory_state_store, table_id, - vec![OrderPair::new(0, OrderType::Ascending)], + vec![ColumnOrder::new(0, OrderType::ascending())], column_ids, 1, Arc::new(AtomicU64::new(0)), @@ -1062,7 +1062,7 @@ mod tests { ], ); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -1082,7 +1082,7 @@ mod tests { Box::new(source), memory_state_store, table_id, - vec![OrderPair::new(0, OrderType::Ascending)], + vec![ColumnOrder::new(0, OrderType::ascending())], column_ids, 1, Arc::new(AtomicU64::new(0)), @@ -1195,7 +1195,7 @@ mod tests { ], ); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), ColumnDesc::unnamed(column_ids[1], DataType::Int32), @@ -1215,7 +1215,7 @@ mod tests { Box::new(source), memory_state_store, table_id, - vec![OrderPair::new(0, OrderType::Ascending)], + vec![ColumnOrder::new(0, OrderType::ascending())], column_ids, 1, Arc::new(AtomicU64::new(0)), diff --git a/src/stream/src/executor/mview/test_utils.rs b/src/stream/src/executor/mview/test_utils.rs index df060d1e43a92..72dd393cb25cd 100644 --- a/src/stream/src/executor/mview/test_utils.rs +++ b/src/stream/src/executor/mview/test_utils.rs @@ -25,7 +25,7 @@ use crate::common::table::state_table::StateTable; pub async fn gen_basic_table(row_count: usize) -> StorageTable { let state_store = MemoryStateStore::new(); - let order_types = vec![OrderType::Ascending, OrderType::Descending]; + let order_types = vec![OrderType::ascending(), OrderType::descending()]; let column_ids = vec![0.into(), 1.into(), 2.into()]; let column_descs = vec![ ColumnDesc::unnamed(column_ids[0], DataType::Int32), @@ -45,7 +45,7 @@ pub async fn gen_basic_table(row_count: usize) -> StorageTable state_store.clone(), TableId::from(0x42), column_descs.clone(), - vec![OrderType::Ascending], + vec![OrderType::ascending()], vec![0], vec![0, 1, 2], ); diff --git a/src/stream/src/executor/project.rs b/src/stream/src/executor/project.rs index b691ac4e7b71f..c7bcc5bf8969d 100644 --- a/src/stream/src/executor/project.rs +++ b/src/stream/src/executor/project.rs @@ -21,39 +21,17 @@ use risingwave_common::array::StreamChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_expr::expr::BoxedExpression; -use super::{ - ActorContextRef, Executor, ExecutorInfo, PkIndices, PkIndicesRef, SimpleExecutor, - SimpleExecutorWrapper, StreamExecutorResult, Watermark, -}; -use crate::common::InfallibleExpression; - -pub type ProjectExecutor = SimpleExecutorWrapper; - -impl ProjectExecutor { - pub fn new( - ctx: ActorContextRef, - input: Box, - pk_indices: PkIndices, - exprs: Vec, - execuotr_id: u64, - watermark_derivations: MultiMap, - ) -> Self { - let info = ExecutorInfo { - schema: input.schema().to_owned(), - pk_indices, - identity: "Project".to_owned(), - }; - SimpleExecutorWrapper { - input, - inner: SimpleProjectExecutor::new(ctx, info, exprs, execuotr_id, watermark_derivations), - } - } -} +use super::*; /// `ProjectExecutor` project data with the `expr`. The `expr` takes a chunk of data, /// and returns a new data chunk. And then, `ProjectExecutor` will insert, delete /// or update element into next operator according to the result of the expression. -pub struct SimpleProjectExecutor { +pub struct ProjectExecutor { + input: BoxedExecutor, + inner: Inner, +} + +struct Inner { ctx: ActorContextRef, info: ExecutorInfo, @@ -62,16 +40,28 @@ pub struct SimpleProjectExecutor { /// All the watermark derivations, (input_column_index, output_column_index). And the /// derivation expression is the project's expression itself. watermark_derivations: MultiMap, + + /// the selectivity threshold which should be in [0,1]. for the chunk with selectivity less + /// than the threshold, the Project executor will construct a new chunk before expr evaluation, + materialize_selectivity_threshold: f64, } -impl SimpleProjectExecutor { +impl ProjectExecutor { pub fn new( ctx: ActorContextRef, - input_info: ExecutorInfo, + input: Box, + pk_indices: PkIndices, exprs: Vec, executor_id: u64, watermark_derivations: MultiMap, + materialize_selectivity_threshold: f64, ) -> Self { + let info = ExecutorInfo { + schema: input.schema().to_owned(), + pk_indices, + identity: "Project".to_owned(), + }; + let schema = Schema { fields: exprs .iter() @@ -79,47 +69,77 @@ impl SimpleProjectExecutor { .collect_vec(), }; Self { - ctx, - info: ExecutorInfo { - schema, - pk_indices: input_info.pk_indices, - identity: format!("ProjectExecutor {:X}", executor_id), + input, + inner: Inner { + ctx, + info: ExecutorInfo { + schema, + pk_indices: info.pk_indices, + identity: format!("ProjectExecutor {:X}", executor_id), + }, + exprs, + watermark_derivations, + materialize_selectivity_threshold, }, - exprs, - watermark_derivations, } } } -impl Debug for SimpleProjectExecutor { +impl Debug for ProjectExecutor { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("ProjectExecutor") - .field("exprs", &self.exprs) + .field("exprs", &self.inner.exprs) .finish() } } -impl SimpleExecutor for SimpleProjectExecutor { - fn map_filter_chunk(&self, chunk: StreamChunk) -> StreamExecutorResult> { - let chunk = chunk.compact(); +impl Executor for ProjectExecutor { + fn schema(&self) -> &Schema { + &self.inner.info.schema + } + + fn pk_indices(&self) -> PkIndicesRef<'_> { + &self.inner.info.pk_indices + } + + fn identity(&self) -> &str { + &self.inner.info.identity + } + + fn execute(self: Box) -> BoxedMessageStream { + self.inner.execute(self.input).boxed() + } +} +impl Inner { + async fn map_filter_chunk( + &self, + chunk: StreamChunk, + ) -> StreamExecutorResult> { + let chunk = if chunk.selectivity() <= self.materialize_selectivity_threshold { + chunk.compact() + } else { + chunk + }; let (data_chunk, ops) = chunk.into_parts(); + let mut projected_columns = Vec::new(); - let projected_columns = self - .exprs - .iter() - .map(|expr| { - Column::new(expr.eval_infallible(&data_chunk, |err| { + for expr in &self.exprs { + let evaluated_expr = expr + .eval_infallible(&data_chunk, |err| { self.ctx.on_compute_error(err, &self.info.identity) - })) - }) - .collect(); - - let new_chunk = StreamChunk::new(ops, projected_columns, None); + }) + .await; + let new_column = Column::new(evaluated_expr); + projected_columns.push(new_column); + } + let (_, vis) = data_chunk.into_parts(); + let vis = vis.into_visibility(); + let new_chunk = StreamChunk::new(ops, projected_columns, vis); Ok(Some(new_chunk)) } - fn handle_watermark(&self, watermark: Watermark) -> StreamExecutorResult> { + async fn handle_watermark(&self, watermark: Watermark) -> StreamExecutorResult> { let out_col_indices = match self.watermark_derivations.get_vec(&watermark.col_idx) { Some(v) => v, None => return Ok(vec![]), @@ -127,16 +147,15 @@ impl SimpleExecutor for SimpleProjectExecutor { let mut ret = vec![]; for out_col_idx in out_col_indices { let out_col_idx = *out_col_idx; - let derived_watermark = watermark.clone().transform_with_expr( - &self.exprs[out_col_idx], - out_col_idx, - |err| { + let derived_watermark = watermark + .clone() + .transform_with_expr(&self.exprs[out_col_idx], out_col_idx, |err| { self.ctx.on_compute_error( err, &(self.info.identity.to_string() + "(when computing watermark)"), ) - }, - ); + }) + .await; if let Some(derived_watermark) = derived_watermark { ret.push(derived_watermark); } else { @@ -149,16 +168,25 @@ impl SimpleExecutor for SimpleProjectExecutor { Ok(ret) } - fn schema(&self) -> &Schema { - &self.info.schema - } - - fn pk_indices(&self) -> PkIndicesRef<'_> { - &self.info.pk_indices - } - - fn identity(&self) -> &str { - &self.info.identity + #[try_stream(ok = Message, error = StreamExecutorError)] + async fn execute(self, input: BoxedExecutor) { + #[for_await] + for msg in input.execute() { + let msg = msg?; + match msg { + Message::Watermark(w) => { + let watermarks = self.handle_watermark(w).await?; + for watermark in watermarks { + yield Message::Watermark(watermark) + } + } + Message::Chunk(chunk) => match self.map_filter_chunk(chunk).await? { + Some(new_chunk) => yield Message::Chunk(new_chunk), + None => continue, + }, + m => yield m, + } + } } } @@ -169,8 +197,8 @@ mod tests { use risingwave_common::array::StreamChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; - use risingwave_expr::expr::{new_binary_expr, InputRefExpression, LiteralExpression}; - use risingwave_pb::expr::expr_node::Type; + use risingwave_expr::expr::{build, Expression, InputRefExpression, LiteralExpression}; + use risingwave_pb::expr::expr_node::PbType; use super::super::test_utils::MockSource; use super::super::*; @@ -197,13 +225,13 @@ mod tests { }; let source = MockSource::with_chunks(schema, PkIndices::new(), vec![chunk1, chunk2]); - let left_expr = InputRefExpression::new(DataType::Int64, 0); - let right_expr = InputRefExpression::new(DataType::Int64, 1); - let test_expr = new_binary_expr( - Type::Add, + let test_expr = build( + PbType::Add, DataType::Int64, - Box::new(left_expr), - Box::new(right_expr), + vec![ + InputRefExpression::new(DataType::Int64, 0).boxed(), + InputRefExpression::new(DataType::Int64, 1).boxed(), + ], ) .unwrap(); @@ -214,6 +242,7 @@ mod tests { vec![test_expr], 1, MultiMap::new(), + 0.0, )); let mut project = project.execute(); @@ -250,23 +279,26 @@ mod tests { }; let (mut tx, source) = MockSource::channel(schema, PkIndices::new()); - let a_left_expr = InputRefExpression::new(DataType::Int64, 0); - let a_right_expr = LiteralExpression::new(DataType::Int64, Some(ScalarImpl::Int64(1))); - let a_expr = new_binary_expr( - Type::Add, + let a_expr = build( + PbType::Add, DataType::Int64, - Box::new(a_left_expr), - Box::new(a_right_expr), + vec![ + InputRefExpression::new(DataType::Int64, 0).boxed(), + LiteralExpression::new(DataType::Int64, Some(ScalarImpl::Int64(1))).boxed(), + ], ) .unwrap(); - let b_left_expr = InputRefExpression::new(DataType::Int64, 0); - let b_right_expr = LiteralExpression::new(DataType::Int64, Some(ScalarImpl::Int64(1))); - let b_expr = new_binary_expr( - Type::Subtract, + let b_expr = build( + PbType::Subtract, DataType::Int64, - Box::new(b_left_expr), - Box::new(b_right_expr), + vec![ + Box::new(InputRefExpression::new(DataType::Int64, 0)), + Box::new(LiteralExpression::new( + DataType::Int64, + Some(ScalarImpl::Int64(1)), + )), + ], ) .unwrap(); @@ -277,6 +309,7 @@ mod tests { vec![a_expr, b_expr], 1, MultiMap::from_iter(vec![(0, 0), (0, 1)].into_iter()), + 0.0, )); let mut project = project.execute(); diff --git a/src/stream/src/executor/project_set.rs b/src/stream/src/executor/project_set.rs index 2d5c1643b9303..046db5003c56e 100644 --- a/src/stream/src/executor/project_set.rs +++ b/src/stream/src/executor/project_set.rs @@ -126,11 +126,12 @@ impl ProjectSetExecutor { .collect_vec(); let mut ret_ops = vec![]; - let results: Vec<_> = self - .select_list - .iter() - .map(|select_item| select_item.eval(&data_chunk)) - .try_collect()?; + let mut results = Vec::with_capacity(self.select_list.len()); + for select_item in &self.select_list { + let result = select_item.eval(&data_chunk).await?; + results.push(result); + } + assert!( results .iter() @@ -209,11 +210,9 @@ mod tests { use risingwave_common::array::StreamChunk; use risingwave_common::catalog::{Field, Schema}; use risingwave_common::types::DataType; - use risingwave_expr::expr::{ - new_binary_expr, Expression, InputRefExpression, LiteralExpression, - }; + use risingwave_expr::expr::{build, Expression, InputRefExpression, LiteralExpression}; use risingwave_expr::table_function::repeat_tf; - use risingwave_pb::expr::expr_node::Type; + use risingwave_pb::expr::expr_node::PbType; use super::super::test_utils::MockSource; use super::super::*; @@ -242,15 +241,16 @@ mod tests { }; let source = MockSource::with_chunks(schema, PkIndices::new(), vec![chunk1, chunk2]); - let left_expr = InputRefExpression::new(DataType::Int64, 0); - let right_expr = InputRefExpression::new(DataType::Int64, 1); - let test_expr = new_binary_expr( - Type::Add, + let test_expr = build( + PbType::Add, DataType::Int64, - Box::new(left_expr), - Box::new(right_expr), + vec![ + Box::new(InputRefExpression::new(DataType::Int64, 0)), + Box::new(InputRefExpression::new(DataType::Int64, 1)), + ], ) .unwrap(); + let tf1 = repeat_tf( LiteralExpression::new(DataType::Int32, Some(1_i32.into())).boxed(), 1, diff --git a/src/stream/src/executor/rearranged_chain.rs b/src/stream/src/executor/rearranged_chain.rs index fd3cf9591bab9..c713eb231189c 100644 --- a/src/stream/src/executor/rearranged_chain.rs +++ b/src/stream/src/executor/rearranged_chain.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; - use futures::channel::{mpsc, oneshot}; use futures::stream::select_with_strategy; use futures::{stream, StreamExt}; @@ -25,7 +23,7 @@ use super::error::StreamExecutorError; use super::{ expect_first_barrier, Barrier, BoxedExecutor, Executor, ExecutorInfo, Message, MessageStream, }; -use crate::executor::{BoxedMessageStream, PkIndices, Watermark}; +use crate::executor::PkIndices; use crate::task::{ActorId, CreateMviewProgress}; /// `ChainExecutor` is an executor that enables synchronization between the existing stream and @@ -40,8 +38,6 @@ pub struct RearrangedChainExecutor { upstream: BoxedExecutor, - upstream_indices: Arc<[usize]>, - progress: CreateMviewProgress, actor_id: ActorId, @@ -49,34 +45,6 @@ pub struct RearrangedChainExecutor { info: ExecutorInfo, } -fn mapping(upstream_indices: &[usize], msg: Message) -> Option { - match msg { - Message::Watermark(watermark) => { - mapping_watermark(watermark, upstream_indices).map(Message::Watermark) - } - Message::Chunk(chunk) => { - let (ops, columns, visibility) = chunk.into_inner(); - let mapped_columns = upstream_indices - .iter() - .map(|&i| columns[i].clone()) - .collect(); - Some(Message::Chunk(StreamChunk::new( - ops, - mapped_columns, - visibility, - ))) - } - Message::Barrier(_) => Some(msg), - } -} - -fn mapping_watermark(watermark: Watermark, upstream_indices: &[usize]) -> Option { - upstream_indices - .iter() - .position(|&idx| idx == watermark.col_idx) - .map(|idx| watermark.with_idx(idx)) -} - #[derive(Debug)] enum RearrangedMessage { RearrangedBarrier(Barrier), @@ -118,7 +86,6 @@ impl RearrangedChainExecutor { pub fn new( snapshot: BoxedExecutor, upstream: BoxedExecutor, - upstream_indices: Vec, progress: CreateMviewProgress, schema: Schema, pk_indices: PkIndices, @@ -131,7 +98,6 @@ impl RearrangedChainExecutor { }, snapshot, upstream, - upstream_indices: upstream_indices.into(), actor_id: progress.actor_id(), progress, } @@ -139,13 +105,7 @@ impl RearrangedChainExecutor { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute_inner(mut self) { - // 0. Project the upstream with `upstream_indices`. - let upstream_indices = self.upstream_indices.clone(); - - let mut upstream = Box::pin(Self::mapping_stream( - self.upstream.execute(), - &upstream_indices, - )); + let mut upstream = Box::pin(self.upstream.execute()); // 1. Poll the upstream to get the first barrier. let first_barrier = expect_first_barrier(&mut upstream).await?; @@ -327,17 +287,6 @@ impl RearrangedChainExecutor { } } } - - #[try_stream(ok = Message, error = StreamExecutorError)] - async fn mapping_stream(stream: BoxedMessageStream, upstream_indices: &[usize]) { - #[for_await] - for msg in stream { - match mapping(upstream_indices, msg?) { - Some(msg) => yield msg, - None => continue, - } - } - } } impl Executor for RearrangedChainExecutor { diff --git a/src/stream/src/executor/row_id_gen.rs b/src/stream/src/executor/row_id_gen.rs index 8489893d435d6..d101fe485c5f0 100644 --- a/src/stream/src/executor/row_id_gen.rs +++ b/src/stream/src/executor/row_id_gen.rs @@ -15,13 +15,14 @@ use futures::StreamExt; use futures_async_stream::try_stream; use risingwave_common::array::column::Column; +use risingwave_common::array::serial_array::{Serial, SerialArrayBuilder}; use risingwave_common::array::stream_chunk::Ops; -use risingwave_common::array::{ArrayBuilder, I64ArrayBuilder, Op, StreamChunk}; +use risingwave_common::array::{ArrayBuilder, Op, StreamChunk}; use risingwave_common::buffer::Bitmap; use risingwave_common::catalog::Schema; use risingwave_common::util::epoch::UNIX_RISINGWAVE_DATE_EPOCH; use risingwave_common::util::iter_util::ZipEqFast; -use risingwave_source::row_id::RowIdGenerator; +use risingwave_common::util::row_id::RowIdGenerator; use super::{ expect_first_barrier, ActorContextRef, BoxedExecutor, Executor, PkIndices, PkIndicesRef, @@ -77,13 +78,13 @@ impl RowIdGenExecutor { /// Generate a row ID column according to ops. async fn gen_row_id_column_by_op(&mut self, column: &Column, ops: Ops<'_>) -> Column { let len = column.array_ref().len(); - let mut builder = I64ArrayBuilder::new(len); + let mut builder = SerialArrayBuilder::new(len); for (datum, op) in column.array_ref().iter().zip_eq_fast(ops) { // Only refill row_id for insert operation. match op { - Op::Insert => builder.append(Some(self.row_id_generator.next().await)), - _ => builder.append(Some(i64::try_from(datum.unwrap()).unwrap())), + Op::Insert => builder.append(Some(self.row_id_generator.next().await.into())), + _ => builder.append(Some(Serial::try_from(datum.unwrap()).unwrap())), } } @@ -159,7 +160,7 @@ mod tests { #[tokio::test] async fn test_row_id_gen_executor() { let schema = Schema::new(vec![ - Field::unnamed(DataType::Int64), + Field::unnamed(DataType::Serial), Field::unnamed(DataType::Int64), ]); let pk_indices = vec![0]; @@ -184,7 +185,7 @@ mod tests { // Insert operation let chunk1 = StreamChunk::from_pretty( - " I I + " SRL I + . 1 + . 2 + . 6 @@ -198,7 +199,7 @@ mod tests { .unwrap() .into_chunk() .unwrap(); - let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); + let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); row_id_col.iter().for_each(|row_id| { // Should generate row id for insert operations. assert!(row_id.is_some()); @@ -206,7 +207,7 @@ mod tests { // Update operation let chunk2 = StreamChunk::from_pretty( - " I I + " SRL I U- 32874283748 1 U+ 32874283748 999", ); @@ -218,14 +219,14 @@ mod tests { .unwrap() .into_chunk() .unwrap(); - let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); + let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); // Should not generate row id for update operations. - assert_eq!(row_id_col.value_at(0).unwrap(), 32874283748); - assert_eq!(row_id_col.value_at(1).unwrap(), 32874283748); + assert_eq!(row_id_col.value_at(0).unwrap(), Serial::from(32874283748)); + assert_eq!(row_id_col.value_at(1).unwrap(), Serial::from(32874283748)); // Delete operation let chunk3 = StreamChunk::from_pretty( - " I I + " SRL I - 84629409685 1", ); tx.push_chunk(chunk3); @@ -236,8 +237,8 @@ mod tests { .unwrap() .into_chunk() .unwrap(); - let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); + let row_id_col: &PrimitiveArray = chunk.column_at(row_id_index).array_ref().into(); // Should not generate row id for delete operations. - assert_eq!(row_id_col.value_at(0).unwrap(), 84629409685); + assert_eq!(row_id_col.value_at(0).unwrap(), Serial::from(84629409685)); } } diff --git a/src/stream/src/executor/simple.rs b/src/stream/src/executor/simple.rs deleted file mode 100644 index ccbdbabecba2f..0000000000000 --- a/src/stream/src/executor/simple.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2023 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::StreamExt; -use futures_async_stream::try_stream; -use risingwave_common::catalog::Schema; - -use super::error::{StreamExecutorError, StreamExecutorResult}; -use super::{ - BoxedExecutor, BoxedMessageStream, Executor, Message, PkIndicesRef, StreamChunk, Watermark, -}; - -/// Executor which can handle [`StreamChunk`]s one by one. -pub trait SimpleExecutor: Send + Sync + 'static { - /// convert a single chunk to zero or one chunks. - fn map_filter_chunk(&self, chunk: StreamChunk) -> StreamExecutorResult>; - - /// convert a single chunk to zero or one chunks. - fn handle_watermark(&self, watermark: Watermark) -> StreamExecutorResult>; - - /// See [`super::Executor::schema`]. - fn schema(&self) -> &Schema; - - /// See [`super::Executor::pk_indices`]. - fn pk_indices(&self) -> PkIndicesRef<'_>; - - /// See [`super::Executor::identity`]. - fn identity(&self) -> &str; -} - -/// The struct wraps a [`SimpleExecutor`], and implements the interface of [`Executor`]. -pub struct SimpleExecutorWrapper { - pub(super) input: BoxedExecutor, - pub(super) inner: E, -} - -impl Executor for SimpleExecutorWrapper -where - E: SimpleExecutor, -{ - fn schema(&self) -> &Schema { - self.inner.schema() - } - - fn pk_indices(&self) -> PkIndicesRef<'_> { - self.inner.pk_indices() - } - - fn identity(&self) -> &str { - self.inner.identity() - } - - fn execute(self: Box) -> BoxedMessageStream { - self.execute_inner().boxed() - } -} - -impl SimpleExecutorWrapper -where - E: SimpleExecutor, -{ - #[try_stream(ok = Message, error = StreamExecutorError)] - async fn execute_inner(self) { - let input = self.input.execute(); - let inner = self.inner; - #[for_await] - for msg in input { - let msg = msg?; - match msg { - Message::Watermark(w) => { - let watermarks = inner.handle_watermark(w)?; - for watermark in watermarks { - yield Message::Watermark(watermark) - } - } - Message::Chunk(chunk) => match inner.map_filter_chunk(chunk)? { - Some(new_chunk) => yield Message::Chunk(new_chunk), - None => continue, - }, - m => yield m, - } - } - } -} diff --git a/src/stream/src/executor/sink.rs b/src/stream/src/executor/sink.rs index f0c82b4e397ce..434fbe8c10ed2 100644 --- a/src/stream/src/executor/sink.rs +++ b/src/stream/src/executor/sink.rs @@ -85,7 +85,7 @@ impl SinkExecutor { input: materialize_executor, metrics, config, - identity: format!("SinkExecutor_{:?}", executor_id), + identity: format!("SinkExecutor {:X?}", executor_id), pk_indices, schema, connector_params, diff --git a/src/stream/src/executor/sort.rs b/src/stream/src/executor/sort.rs index 61dcf9d01b9d1..fb700b6ddabcf 100644 --- a/src/stream/src/executor/sort.rs +++ b/src/stream/src/executor/sort.rs @@ -217,7 +217,7 @@ impl SortExecutor { // Update the vnode bitmap for the state table if asked. Also update the buffer. if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(self.context.id) { - let prev_vnode_bitmap = + let (prev_vnode_bitmap, _cache_may_stale) = self.state_table.update_vnode_bitmap(vnode_bitmap.clone()); self.fill_buffer(Some(&prev_vnode_bitmap), &vnode_bitmap) .await?; @@ -247,7 +247,7 @@ impl SortExecutor { let no_longer_owned_vnodes = Bitmap::bit_saturate_subtract(prev_vnode_bitmap, curr_vnode_bitmap); self.buffer.retain(|(_, pk), _| { - let vnode = self.state_table.compute_vnode(pk); + let vnode = self.state_table.compute_vnode_by_pk(pk); !no_longer_owned_vnodes.is_set(vnode.to_index()) }); } @@ -504,7 +504,7 @@ mod tests { ColumnDesc::unnamed(ColumnId::new(0), DataType::Int64), ColumnDesc::unnamed(ColumnId::new(1), DataType::Int64), ]; - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let pk_indices = create_pk_indices(); StateTable::new_without_distribution( memory_state_store, diff --git a/src/stream/src/executor/sort_buffer.rs b/src/stream/src/executor/sort_buffer.rs index 8d65eba6caf03..f5769660247cc 100644 --- a/src/stream/src/executor/sort_buffer.rs +++ b/src/stream/src/executor/sort_buffer.rs @@ -287,7 +287,7 @@ mod tests { let row_pretty = |s: &str| OwnedRow::from_pretty_with_tys(&tys, s); - let order_types = vec![OrderType::Ascending]; + let order_types = vec![OrderType::ascending()]; let mut state_table = StateTable::new_without_distribution( state_store.clone(), table_id, diff --git a/src/stream/src/executor/source/source_executor.rs b/src/stream/src/executor/source/source_executor.rs index a500354e98dc2..8d457c85b24bd 100644 --- a/src/stream/src/executor/source/source_executor.rs +++ b/src/stream/src/executor/source/source_executor.rs @@ -488,21 +488,21 @@ impl Debug for SourceExecutor { #[cfg(test)] mod tests { - use std::sync::atomic::AtomicU64; + use std::time::Duration; use maplit::{convert_args, hashmap}; use risingwave_common::array::StreamChunk; - use risingwave_common::catalog::{ColumnId, ConflictBehavior, Field, Schema, TableId}; + use risingwave_common::catalog::{ColumnId, Field, Schema, TableId}; use risingwave_common::test_prelude::StreamChunkTestExt; use risingwave_common::types::DataType; - use risingwave_common::util::sort_util::{OrderPair, OrderType}; use risingwave_connector::source::datagen::DatagenSplit; use risingwave_pb::catalog::StreamSourceInfo; - use risingwave_pb::plan_common::RowFormatType as ProstRowFormatType; + use risingwave_pb::plan_common::PbRowFormatType; use risingwave_source::connector_test_utils::create_source_desc_builder; use risingwave_storage::memory::MemoryStateStore; use tokio::sync::mpsc::unbounded_channel; + use tracing_test::traced_test; use super::*; use crate::executor::ActorContext; @@ -519,7 +519,7 @@ mod tests { let pk_column_ids = vec![0]; let pk_indices = vec![0]; let source_info = StreamSourceInfo { - row_format: ProstRowFormatType::Native as i32, + row_format: PbRowFormatType::Native as i32, ..Default::default() }; let (barrier_tx, barrier_rx) = unbounded_channel::(); @@ -600,6 +600,7 @@ mod tests { ); } + #[traced_test] #[tokio::test] async fn test_split_change_mutation() { let table_id = TableId::default(); @@ -610,14 +611,14 @@ mod tests { let pk_column_ids = vec![0]; let pk_indices = vec![0_usize]; let source_info = StreamSourceInfo { - row_format: ProstRowFormatType::Native as i32, + row_format: PbRowFormatType::Native as i32, ..Default::default() }; let properties = convert_args!(hashmap!( "connector" => "datagen", - "fields.v1.min" => "1", - "fields.v1.max" => "1000", - "fields.v1.seed" => "12345", + "fields.v1.kind" => "sequence", + "fields.v1.start" => "11", + "fields.v1.end" => "11111", )); let source_desc_builder = create_source_desc_builder( @@ -658,20 +659,7 @@ mod tests { u64::MAX, 1, ); - - let mut materialize = MaterializeExecutor::for_test( - Box::new(executor), - mem_state_store.clone(), - TableId::from(0x2333), - vec![OrderPair::new(0, OrderType::Ascending)], - column_ids, - 2, - Arc::new(AtomicU64::new(0)), - ConflictBehavior::NoCheck, - ) - .await - .boxed() - .execute(); + let mut handler = Box::new(executor).execute(); let init_barrier = Barrier::new_test_barrier(1).with_mutation(Mutation::Add { adds: HashMap::new(), @@ -687,11 +675,11 @@ mod tests { }); barrier_tx.send(init_barrier).unwrap(); - (materialize.next().await.unwrap().unwrap()) + (handler.next().await.unwrap().unwrap()) .into_barrier() .unwrap(); - let mut ready_chunks = materialize.ready_chunks(10); + let mut ready_chunks = handler.ready_chunks(10); let chunks = (ready_chunks.next().await.unwrap()) .into_iter() .map(|msg| msg.unwrap().into_chunk().unwrap()) @@ -701,10 +689,10 @@ mod tests { chunk_1, StreamChunk::from_pretty( " i - + 533 - + 833 - + 738 - + 344", + + 11 + + 14 + + 17 + + 20", ) ); @@ -719,6 +707,11 @@ mod tests { split_num: 3, start_offset: None, }), + SplitImpl::Datagen(DatagenSplit { + split_index: 2, + split_num: 3, + start_offset: None, + }), ]; let change_split_mutation = @@ -751,18 +744,22 @@ mod tests { let chunk_2 = StreamChunk::concat(chunks).sort_rows(); assert_eq!( chunk_2, - // mixed from datagen split 0 and 1 + // mixed from datagen split 0, 1 and 2 StreamChunk::from_pretty( " i + + 12 + + 13 + + 15 + + 16 + + 18 + + 19 + + 23 + + 26 + 29 - + 201 - + 344 - + 425 - + 525 - + 533 - + 833", + + 32", ) ); + tracing::debug!("chunk_2: {:?}", chunk_2.to_pretty_string()); let barrier = Barrier::new_test_barrier(3).with_mutation(Mutation::Pause); barrier_tx.send(barrier).unwrap(); diff --git a/src/stream/src/executor/source/state_table_handler.rs b/src/stream/src/executor/source/state_table_handler.rs index 0ceae9e36a6f0..f578db63221eb 100644 --- a/src/stream/src/executor/source/state_table_handler.rs +++ b/src/stream/src/executor/source/state_table_handler.rs @@ -15,20 +15,20 @@ use std::collections::HashSet; use std::ops::{Bound, Deref}; -use bytes::Bytes; use futures::{pin_mut, StreamExt}; +use risingwave_common::array::JsonbVal; use risingwave_common::catalog::{DatabaseId, SchemaId}; use risingwave_common::constants::hummock::PROPERTIES_RETENTION_SECOND_KEY; use risingwave_common::hash::VirtualNode; use risingwave_common::row::{OwnedRow, Row}; -use risingwave_common::types::{ScalarImpl, ScalarRefImpl}; +use risingwave_common::types::{ScalarImpl, ScalarRef, ScalarRefImpl}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::{bail, row}; use risingwave_connector::source::{SplitId, SplitImpl, SplitMetaData}; use risingwave_hummock_sdk::key::next_key; use risingwave_pb::catalog::table::TableType; -use risingwave_pb::catalog::Table as ProstTable; -use risingwave_pb::common::{PbColumnOrder, PbDirection, PbOrderType}; +use risingwave_pb::catalog::PbTable; +use risingwave_pb::common::{PbColumnOrder, PbDirection, PbNullsAre, PbOrderType}; use risingwave_pb::data::data_type::TypeName; use risingwave_pb::data::DataType; use risingwave_pb::plan_common::{ColumnCatalog, ColumnDesc}; @@ -46,7 +46,7 @@ pub struct SourceStateTableHandler { } impl SourceStateTableHandler { - pub async fn from_table_catalog(table_catalog: &ProstTable, store: S) -> Self { + pub async fn from_table_catalog(table_catalog: &PbTable, store: S) -> Self { // The state of source should not be cleaned up by retention_seconds assert!(!table_catalog .properties @@ -96,8 +96,8 @@ impl SourceStateTableHandler { pin_mut!(iter); while let Some(row) = iter.next().await { let row = row?; - if let Some(ScalarRefImpl::Bytea(bytes)) = row.datum_at(1) { - let split = SplitImpl::restore_from_bytes(bytes)?; + if let Some(ScalarRefImpl::Jsonb(jsonb_ref)) = row.datum_at(1) { + let split = SplitImpl::restore_from_json(jsonb_ref.to_owned_scalar())?; let fs = split .as_fs() .unwrap_or_else(|| panic!("split {:?} is not fs", split)); @@ -110,14 +110,14 @@ impl SourceStateTableHandler { Ok(set) } - async fn set_complete(&mut self, key: SplitId, value: Bytes) -> StreamExecutorResult<()> { + async fn set_complete(&mut self, key: SplitId, value: JsonbVal) -> StreamExecutorResult<()> { let row = [ Some(Self::string_to_scalar(format!( "{}{}", COMPLETE_SPLIT_PREFIX, key.deref() ))), - Some(ScalarImpl::Bytea(Box::from(value.as_ref()))), + Some(ScalarImpl::Jsonb(value)), ]; if let Some(prev_row) = self.get(key).await? { self.state_store.delete(prev_row); @@ -137,17 +137,17 @@ impl SourceStateTableHandler { bail!("states require not null"); } else { for split in states { - self.set_complete(split.id(), split.encode_to_bytes()) + self.set_complete(split.id(), split.encode_to_json()) .await?; } } Ok(()) } - async fn set(&mut self, key: SplitId, value: Bytes) -> StreamExecutorResult<()> { + async fn set(&mut self, key: SplitId, value: JsonbVal) -> StreamExecutorResult<()> { let row = [ Some(Self::string_to_scalar(key.deref())), - Some(ScalarImpl::Bytea(Vec::from(value).into_boxed_slice())), + Some(ScalarImpl::Jsonb(value)), ]; match self.get(key).await? { Some(prev_row) => { @@ -173,7 +173,7 @@ impl SourceStateTableHandler { bail!("states require not null"); } else { for split_impl in states { - self.set(split_impl.id(), split_impl.encode_to_bytes()) + self.set(split_impl.id(), split_impl.encode_to_json()) .await?; } } @@ -188,7 +188,9 @@ impl SourceStateTableHandler { Ok(match self.get(stream_source_split.id()).await? { None => None, Some(row) => match row.datum_at(1) { - Some(ScalarRefImpl::Bytea(bytes)) => Some(SplitImpl::restore_from_bytes(bytes)?), + Some(ScalarRefImpl::Jsonb(jsonb_ref)) => { + Some(SplitImpl::restore_from_json(jsonb_ref.to_owned_scalar())?) + } _ => unreachable!(), }, }) @@ -197,7 +199,7 @@ impl SourceStateTableHandler { // align with schema defined in `LogicalSource::infer_internal_table_catalog`. The function is used // for test purpose and should not be used in production. -pub fn default_source_internal_table(id: u32) -> ProstTable { +pub fn default_source_internal_table(id: u32) -> PbTable { let make_column = |column_type: TypeName, column_id: i32| -> ColumnCatalog { ColumnCatalog { column_desc: Some(ColumnDesc { @@ -214,9 +216,9 @@ pub fn default_source_internal_table(id: u32) -> ProstTable { let columns = vec![ make_column(TypeName::Varchar, 0), - make_column(TypeName::Bytea, 1), + make_column(TypeName::Jsonb, 1), ]; - ProstTable { + PbTable { id, schema_id: SchemaId::placeholder().schema_id, database_id: DatabaseId::placeholder().database_id, @@ -228,6 +230,7 @@ pub fn default_source_internal_table(id: u32) -> ProstTable { column_index: 0, order_type: Some(PbOrderType { direction: PbDirection::Ascending as _, + nulls_are: PbNullsAre::Largest as _, }), }], ..Default::default() @@ -243,6 +246,7 @@ pub(crate) mod tests { use risingwave_common::util::epoch::EpochPair; use risingwave_connector::source::kafka::KafkaSplit; use risingwave_storage::memory::MemoryStateStore; + use serde_json::Value; use super::*; @@ -254,8 +258,10 @@ pub(crate) mod tests { .await; let a: Arc = String::from("a").into(); let a: Datum = Some(ScalarImpl::Utf8(a.as_ref().into())); - let b: Arc = String::from("b").into(); - let b: Datum = Some(ScalarImpl::Utf8(b.as_ref().into())); + let b: JsonbVal = serde_json::from_str::("{\"k1\": \"v1\", \"k2\": 11}") + .unwrap() + .into(); + let b: Datum = Some(ScalarImpl::Jsonb(b)); let init_epoch_num = 100100; let init_epoch = EpochPair::new_test_epoch(init_epoch_num); @@ -280,6 +286,7 @@ pub(crate) mod tests { .await; let split_impl = SplitImpl::Kafka(KafkaSplit::new(0, Some(0), None, "test".into())); let serialized = split_impl.encode_to_bytes(); + let serialized_json = split_impl.encode_to_json(); let epoch_1 = EpochPair::new_test_epoch(1); let epoch_2 = EpochPair::new_test_epoch(2); @@ -299,6 +306,7 @@ pub(crate) mod tests { { Some(s) => { assert_eq!(s.encode_to_bytes(), serialized); + assert_eq!(s.encode_to_json(), serialized_json); } None => unreachable!(), } diff --git a/src/stream/src/executor/subtask.rs b/src/stream/src/executor/subtask.rs index c4873087c568a..4cc5cc512d821 100644 --- a/src/stream/src/executor/subtask.rs +++ b/src/stream/src/executor/subtask.rs @@ -18,7 +18,9 @@ use tokio::sync::mpsc; use tokio::sync::mpsc::error::SendError; use tokio_stream::wrappers::ReceiverStream; -use super::{BoxedExecutor, Executor, ExecutorInfo, MessageStreamItem}; +use super::actor::spawn_blocking_drop_stream; +use super::{BoxedExecutor, Executor, ExecutorInfo, Message, MessageStreamItem}; +use crate::task::ActorId; /// Handle used to drive the subtask. pub type SubtaskHandle = impl Future + Send + 'static; @@ -59,7 +61,7 @@ impl Executor for SubtaskRxExecutor { /// Used when there're multiple stateful executors in an actor. These subtasks can be concurrently /// executed to improve the I/O performance, while the computing resource can be still bounded to a /// single thread. -pub fn wrap(input: BoxedExecutor) -> (SubtaskHandle, SubtaskRxExecutor) { +pub fn wrap(input: BoxedExecutor, actor_id: ActorId) -> (SubtaskHandle, SubtaskRxExecutor) { let (tx, rx) = mpsc::channel(1); let rx_executor = SubtaskRxExecutor { info: ExecutorInfo { @@ -71,7 +73,18 @@ pub fn wrap(input: BoxedExecutor) -> (SubtaskHandle, SubtaskRxExecutor) { let handle = async move { let mut input = input.execute(); + while let Some(item) = input.next().await { + // Decide whether to stop the subtask. We explicitly do this instead of relying on the + // termination of the input stream, because we don't want to exhaust the stream, which + // causes the stream dropped in the scope of the current async task and blocks other + // actors. See `spawn_blocking_drop_stream` for more details. + let to_stop = match &item { + Ok(Message::Barrier(barrier)) => barrier.is_stop_or_update_drop_actor(actor_id), + Ok(_) => false, + Err(_) => true, + }; + // It's possible that the downstream itself yields an error (e.g. from remote input) and // finishes, so we may fail to send the message. In this case, we can simply ignore the // send error and exit as well. If the message itself is another error, log it. @@ -84,7 +97,13 @@ pub fn wrap(input: BoxedExecutor) -> (SubtaskHandle, SubtaskRxExecutor) { } break; } + + if to_stop { + break; + } } + + spawn_blocking_drop_stream(input).await; } .instrument_await("Subtask"); diff --git a/src/stream/src/executor/temporal_join.rs b/src/stream/src/executor/temporal_join.rs new file mode 100644 index 0000000000000..6b119047447cb --- /dev/null +++ b/src/stream/src/executor/temporal_join.rs @@ -0,0 +1,306 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::alloc::Global; +use std::sync::Arc; + +use either::Either; +use futures::stream::{self, PollNext}; +use futures::{StreamExt, TryStreamExt}; +use futures_async_stream::try_stream; +use local_stats_alloc::{SharedStatsAlloc, StatsAlloc}; +use lru::DefaultHasher; +use risingwave_common::array::{Op, StreamChunk}; +use risingwave_common::catalog::Schema; +use risingwave_common::row::{OwnedRow, Row, RowExt}; +use risingwave_common::util::iter_util::ZipEqFast; +use risingwave_expr::expr::BoxedExpression; +use risingwave_hummock_sdk::{HummockEpoch, HummockReadEpoch}; +use risingwave_storage::table::batch_table::storage_table::StorageTable; +use risingwave_storage::StateStore; + +use super::{Barrier, Executor, Message, MessageStream, StreamExecutorError, StreamExecutorResult}; +use crate::cache::{new_with_hasher_in, ManagedLruCache}; +use crate::common::StreamChunkBuilder; +use crate::executor::monitor::StreamingMetrics; +use crate::executor::{ActorContextRef, BoxedExecutor, JoinType, JoinTypePrimitive, PkIndices}; +use crate::task::AtomicU64Ref; + +pub struct TemporalJoinExecutor { + ctx: ActorContextRef, + left: BoxedExecutor, + right: BoxedExecutor, + right_table: TemporalSide, + left_join_keys: Vec, + right_join_keys: Vec, + null_safe: Vec, + condition: Option, + output_indices: Vec, + pk_indices: PkIndices, + schema: Schema, + chunk_size: usize, + identity: String, + // TODO: update metrics + #[allow(dead_code)] + metrics: Arc, +} + +struct TemporalSide { + source: StorageTable, + table_output_indices: Vec, + cache: ManagedLruCache, DefaultHasher, SharedStatsAlloc>, +} + +impl TemporalSide { + async fn lookup( + &mut self, + key: impl Row, + epoch: HummockEpoch, + ) -> StreamExecutorResult> { + let key = key.into_owned_row(); + Ok(match self.cache.get(&key) { + Some(res) => res.clone(), + None => { + let res = self + .source + .get_row(key.clone(), HummockReadEpoch::NoWait(epoch)) + .await? + .map(|row| row.project(&self.table_output_indices).into_owned_row()); + self.cache.put(key, res.clone()); + res + } + }) + } + + fn update(&mut self, payload: Vec, join_keys: &[usize], epoch: u64) { + payload.iter().flat_map(|c| c.rows()).for_each(|(op, row)| { + let key = row.project(join_keys).into_owned_row(); + if let Some(value) = self.cache.get_mut(&key) { + match op { + Op::Insert | Op::UpdateInsert => *value = Some(row.into_owned_row()), + Op::Delete | Op::UpdateDelete => *value = None, + }; + } + }); + self.cache.update_epoch(epoch); + } +} + +enum InternalMessage { + Chunk(StreamChunk), + Barrier(Vec, Barrier), +} + +#[try_stream(ok = StreamChunk, error = StreamExecutorError)] +pub async fn chunks_until_barrier(stream: impl MessageStream, expected_barrier: Barrier) { + #[for_await] + for item in stream { + match item? { + Message::Watermark(_) => { + todo!("https://github.com/risingwavelabs/risingwave/issues/6042") + } + Message::Chunk(c) => yield c, + Message::Barrier(b) if b.epoch != expected_barrier.epoch => { + return Err(StreamExecutorError::align_barrier(expected_barrier, b)); + } + Message::Barrier(_) => return Ok(()), + } + } +} + +// Align the left and right inputs according to their barriers, +// such that in the produced stream, an aligned interval starts with +// any number of `InternalMessage::Chunk(left_chunk)` and followed by +// `InternalMessage::Barrier(right_chunks, barrier)`. +#[try_stream(ok = InternalMessage, error = StreamExecutorError)] +async fn align_input(left: Box, right: Box) { + let mut left = Box::pin(left.execute()); + let mut right = Box::pin(right.execute()); + // Keep producing intervals until stream exhaustion or errors. + loop { + let mut right_chunks = vec![]; + // Produce an aligned interval. + 'inner: loop { + let mut combined = stream::select_with_strategy( + left.by_ref().map(Either::Left), + right.by_ref().map(Either::Right), + |_: &mut ()| PollNext::Left, + ); + match combined.next().await { + Some(Either::Left(Ok(Message::Chunk(c)))) => yield InternalMessage::Chunk(c), + Some(Either::Right(Ok(Message::Chunk(c)))) => right_chunks.push(c), + Some(Either::Left(Ok(Message::Barrier(b)))) => { + let mut remain = chunks_until_barrier(right.by_ref(), b.clone()) + .try_collect() + .await?; + right_chunks.append(&mut remain); + yield InternalMessage::Barrier(right_chunks, b); + break 'inner; + } + Some(Either::Right(Ok(Message::Barrier(b)))) => { + #[for_await] + for chunk in chunks_until_barrier(left.by_ref(), b.clone()) { + yield InternalMessage::Chunk(chunk?); + } + yield InternalMessage::Barrier(right_chunks, b); + break 'inner; + } + Some(Either::Left(Err(e)) | Either::Right(Err(e))) => return Err(e), + Some( + Either::Left(Ok(Message::Watermark(_))) + | Either::Right(Ok(Message::Watermark(_))), + ) => todo!("https://github.com/risingwavelabs/risingwave/issues/6042"), + None => return Ok(()), + } + } + } +} + +impl TemporalJoinExecutor { + #[allow(clippy::too_many_arguments)] + pub fn new( + ctx: ActorContextRef, + left: BoxedExecutor, + right: BoxedExecutor, + table: StorageTable, + left_join_keys: Vec, + right_join_keys: Vec, + null_safe: Vec, + condition: Option, + pk_indices: PkIndices, + output_indices: Vec, + table_output_indices: Vec, + executor_id: u64, + watermark_epoch: AtomicU64Ref, + metrics: Arc, + chunk_size: usize, + ) -> Self { + let schema_fields = [left.schema().fields.clone(), right.schema().fields.clone()].concat(); + + let schema: Schema = output_indices + .iter() + .map(|&idx| schema_fields[idx].clone()) + .collect(); + + let alloc = StatsAlloc::new(Global).shared(); + + let cache = new_with_hasher_in(watermark_epoch, DefaultHasher::default(), alloc); + + Self { + ctx, + left, + right, + right_table: TemporalSide { + source: table, + table_output_indices, + cache, + }, + left_join_keys, + right_join_keys, + null_safe, + condition, + output_indices, + schema, + chunk_size, + pk_indices, + identity: format!("TemporalJoinExecutor {:X}", executor_id), + metrics, + } + } + + #[try_stream(ok = Message, error = StreamExecutorError)] + async fn into_stream(mut self) { + let (left_map, right_map) = StreamChunkBuilder::get_i2o_mapping( + self.output_indices.iter().cloned(), + self.left.schema().len(), + self.right.schema().len(), + ); + + let mut prev_epoch = None; + #[for_await] + for msg in align_input(self.left, self.right) { + match msg? { + InternalMessage::Chunk(chunk) => { + let mut builder = StreamChunkBuilder::new( + self.chunk_size, + &self.schema.data_types(), + left_map.clone(), + right_map.clone(), + ); + let epoch = prev_epoch.expect("Chunk data should come after some barrier."); + for (op, left_row) in chunk.rows() { + let key = left_row.project(&self.left_join_keys); + if key + .iter() + .zip_eq_fast(self.null_safe.iter()) + .any(|(datum, can_null)| datum.is_none() && !*can_null) + { + continue; + } + if let Some(right_row) = self.right_table.lookup(key, epoch).await? { + // check join condition + let ok = if let Some(ref mut cond) = self.condition { + let concat_row = left_row.chain(&right_row).into_owned_row(); + cond.eval_row_infallible(&concat_row, |err| { + self.ctx.on_compute_error(err, self.identity.as_str()) + }) + .await + .map(|s| *s.as_bool()) + .unwrap_or(false) + } else { + true + }; + if ok { + if let Some(chunk) = builder.append_row(op, left_row, &right_row) { + yield Message::Chunk(chunk); + } + } + } else if T == JoinType::LeftOuter { + if let Some(chunk) = builder.append_row_update(op, left_row) { + yield Message::Chunk(chunk); + } + } + } + if let Some(chunk) = builder.take() { + yield Message::Chunk(chunk); + } + } + InternalMessage::Barrier(updates, barrier) => { + prev_epoch = Some(barrier.epoch.curr); + self.right_table + .update(updates, &self.right_join_keys, barrier.epoch.curr); + yield Message::Barrier(barrier) + } + } + } + } +} + +impl Executor for TemporalJoinExecutor { + fn execute(self: Box) -> super::BoxedMessageStream { + self.into_stream().boxed() + } + + fn schema(&self) -> &Schema { + &self.schema + } + + fn pk_indices(&self) -> super::PkIndicesRef<'_> { + &self.pk_indices + } + + fn identity(&self) -> &str { + self.identity.as_str() + } +} diff --git a/src/stream/src/executor/test_utils.rs b/src/stream/src/executor/test_utils.rs index 2a4b325964349..8d86ead85473b 100644 --- a/src/stream/src/executor/test_utils.rs +++ b/src/stream/src/executor/test_utils.rs @@ -254,17 +254,17 @@ pub mod agg_executor { }; for idx in group_key_indices { - add_column(*idx, input_fields[*idx].data_type(), OrderType::Ascending); + add_column(*idx, input_fields[*idx].data_type(), OrderType::ascending()); } add_column(agg_call.args.val_indices()[0], agg_call.args.arg_types()[0].clone(), if agg_call.kind == AggKind::Max { - OrderType::Descending + OrderType::descending() } else { - OrderType::Ascending + OrderType::ascending() }); for idx in pk_indices { - add_column(*idx, input_fields[*idx].data_type(), OrderType::Ascending); + add_column(*idx, input_fields[*idx].data_type(), OrderType::ascending()); } let state_table = StateTable::new_without_distribution( @@ -316,7 +316,7 @@ pub mod agg_executor { group_key_indices.iter().for_each(|idx| { add_column_desc(input_fields[*idx].data_type()); - order_types.push(OrderType::Ascending); + order_types.push(OrderType::ascending()); }); agg_calls.iter().for_each(|agg_call| { diff --git a/src/stream/src/executor/top_n/group_top_n.rs b/src/stream/src/executor/top_n/group_top_n.rs index 2aeffa4fff1f3..9cc03f2e66a71 100644 --- a/src/stream/src/executor/top_n/group_top_n.rs +++ b/src/stream/src/executor/top_n/group_top_n.rs @@ -22,13 +22,13 @@ use risingwave_common::hash::HashKey; use risingwave_common::row::RowExt; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_storage::StateStore; use super::top_n_cache::TopNCacheTrait; use super::utils::*; use super::TopNCache; -use crate::cache::{cache_may_stale, new_unbounded, ExecutorCache}; +use crate::cache::{new_unbounded, ExecutorCache}; use crate::common::table::state_table::StateTable; use crate::error::StreamResult; use crate::executor::error::StreamExecutorResult; @@ -37,16 +37,16 @@ use crate::executor::{ActorContextRef, Executor, ExecutorInfo, PkIndices, Waterm use crate::task::AtomicU64Ref; pub type GroupTopNExecutor = - TopNExecutorWrapper>; + TopNExecutorWrapper>; impl GroupTopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, @@ -56,7 +56,7 @@ impl GroupTopNExecutor GroupTopNExecutor { +pub struct InnerGroupTopNExecutor { info: ExecutorInfo, /// `LIMIT XXX`. None means no limit. @@ -94,13 +94,13 @@ pub struct InnerGroupTopNExecutorNew InnerGroupTopNExecutorNew { +impl InnerGroupTopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new( input_info: ExecutorInfo, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, @@ -123,7 +123,7 @@ impl InnerGroupTopNExecutorNew offset: offset_and_limit.0, limit: offset_and_limit.1, managed_state, - storage_key_indices: storage_key.into_iter().map(|op| op.column_idx).collect(), + storage_key_indices: storage_key.into_iter().map(|op| op.column_index).collect(), group_by, caches: GroupTopNCache::new(lru_manager), cache_key_serde, @@ -158,7 +158,7 @@ impl DerefMut for GroupTopNCache TopNExecutorBase - for InnerGroupTopNExecutorNew + for InnerGroupTopNExecutor where TopNCache: TopNCacheTrait, { @@ -178,7 +178,8 @@ where // If 'self.caches' does not already have a cache for the current group, create a new // cache for it and insert it into `self.caches` if !self.caches.contains(group_cache_key) { - let mut topn_cache = TopNCache::new(self.offset, self.limit); + let mut topn_cache = + TopNCache::new(self.offset, self.limit, self.schema().data_types()); self.managed_state .init_topn_cache(Some(group_key), &mut topn_cache) .await?; @@ -221,12 +222,12 @@ where } fn update_vnode_bitmap(&mut self, vnode_bitmap: Arc) { - let previous_vnode_bitmap = self + let (_previous_vnode_bitmap, cache_may_stale) = self .managed_state .state_table - .update_vnode_bitmap(vnode_bitmap.clone()); + .update_vnode_bitmap(vnode_bitmap); - if cache_may_stale(&previous_vnode_bitmap, &vnode_bitmap) { + if cache_may_stale { self.caches.clear(); } } @@ -280,22 +281,22 @@ mod tests { } } - fn storage_key() -> Vec { + fn storage_key() -> Vec { vec![ - OrderPair::new(1, OrderType::Ascending), - OrderPair::new(2, OrderType::Ascending), - OrderPair::new(0, OrderType::Ascending), + ColumnOrder::new(1, OrderType::ascending()), + ColumnOrder::new(2, OrderType::ascending()), + ColumnOrder::new(0, OrderType::ascending()), ] } /// group by 1, order by 2 - fn order_by_1() -> Vec { - vec![OrderPair::new(2, OrderType::Ascending)] + fn order_by_1() -> Vec { + vec![ColumnOrder::new(2, OrderType::ascending())] } /// group by 1,2, order by 0 - fn order_by_2() -> Vec { - vec![OrderPair::new(0, OrderType::Ascending)] + fn order_by_2() -> Vec { + vec![ColumnOrder::new(0, OrderType::ascending())] } fn pk_indices() -> PkIndices { @@ -360,9 +361,9 @@ mod tests { let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64, DataType::Int64], &[ - OrderType::Ascending, - OrderType::Ascending, - OrderType::Ascending, + OrderType::ascending(), + OrderType::ascending(), + OrderType::ascending(), ], &pk_indices(), ) @@ -456,9 +457,9 @@ mod tests { let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64, DataType::Int64], &[ - OrderType::Ascending, - OrderType::Ascending, - OrderType::Ascending, + OrderType::ascending(), + OrderType::ascending(), + OrderType::ascending(), ], &pk_indices(), ) @@ -546,9 +547,9 @@ mod tests { let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64, DataType::Int64], &[ - OrderType::Ascending, - OrderType::Ascending, - OrderType::Ascending, + OrderType::ascending(), + OrderType::ascending(), + OrderType::ascending(), ], &pk_indices(), ) diff --git a/src/stream/src/executor/top_n/group_top_n_appendonly.rs b/src/stream/src/executor/top_n/group_top_n_appendonly.rs index ea2141af2d03a..c173f44759e8f 100644 --- a/src/stream/src/executor/top_n/group_top_n_appendonly.rs +++ b/src/stream/src/executor/top_n/group_top_n_appendonly.rs @@ -35,14 +35,13 @@ use risingwave_common::hash::HashKey; use risingwave_common::row::{RowDeserializer, RowExt}; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::iter_util::ZipEqDebug; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_storage::StateStore; use super::group_top_n::GroupTopNCache; use super::top_n_cache::AppendOnlyTopNCacheTrait; use super::utils::*; use super::TopNCache; -use crate::cache::cache_may_stale; use crate::common::table::state_table::StateTable; use crate::error::StreamResult; use crate::executor::error::StreamExecutorResult; @@ -54,7 +53,7 @@ use crate::task::AtomicU64Ref; /// to keep all the data records/rows that have been seen. As long as a record /// is no longer being in the result set, it can be deleted. pub type AppendOnlyGroupTopNExecutor = - TopNExecutorWrapper>; + TopNExecutorWrapper>; impl AppendOnlyGroupTopNExecutor @@ -63,9 +62,9 @@ impl pub fn new( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, @@ -75,7 +74,7 @@ impl Ok(TopNExecutorWrapper { input, ctx, - inner: InnerAppendOnlyGroupTopNExecutorNew::new( + inner: InnerAppendOnlyGroupTopNExecutor::new( info, storage_key, offset_and_limit, @@ -89,7 +88,7 @@ impl } } -pub struct InnerAppendOnlyGroupTopNExecutorNew { +pub struct InnerAppendOnlyGroupTopNExecutor { info: ExecutorInfo, /// `LIMIT XXX`. None means no limit. @@ -114,14 +113,14 @@ pub struct InnerAppendOnlyGroupTopNExecutorNew - InnerAppendOnlyGroupTopNExecutorNew + InnerAppendOnlyGroupTopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new( input_info: ExecutorInfo, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, @@ -144,7 +143,7 @@ impl offset: offset_and_limit.0, limit: offset_and_limit.1, managed_state, - storage_key_indices: storage_key.into_iter().map(|op| op.column_idx).collect(), + storage_key_indices: storage_key.into_iter().map(|op| op.column_index).collect(), group_by, caches: GroupTopNCache::new(lru_manager), cache_key_serde, @@ -153,7 +152,7 @@ impl } #[async_trait] impl TopNExecutorBase - for InnerAppendOnlyGroupTopNExecutorNew + for InnerAppendOnlyGroupTopNExecutor where TopNCache: AppendOnlyTopNCacheTrait, { @@ -164,7 +163,7 @@ where let keys = K::build(&self.group_by, chunk.data_chunk())?; let data_types = self.schema().data_types(); - let row_deserializer = RowDeserializer::new(data_types); + let row_deserializer = RowDeserializer::new(data_types.clone()); for ((op, row_ref), group_cache_key) in chunk.rows().zip_eq_debug(keys.iter()) { // The pk without group by @@ -176,7 +175,7 @@ where // If 'self.caches' does not already have a cache for the current group, create a new // cache for it and insert it into `self.caches` if !self.caches.contains(group_cache_key) { - let mut topn_cache = TopNCache::new(self.offset, self.limit); + let mut topn_cache = TopNCache::new(self.offset, self.limit, data_types.clone()); self.managed_state .init_topn_cache(Some(group_key), &mut topn_cache) .await?; @@ -207,12 +206,12 @@ where } fn update_vnode_bitmap(&mut self, vnode_bitmap: Arc) { - let previous_vnode_bitmap = self + let (_previous_vnode_bitmap, cache_may_stale) = self .managed_state .state_table - .update_vnode_bitmap(vnode_bitmap.clone()); + .update_vnode_bitmap(vnode_bitmap); - if cache_may_stale(&previous_vnode_bitmap, &vnode_bitmap) { + if cache_may_stale { self.caches.clear(); } } diff --git a/src/stream/src/executor/top_n/top_n_appendonly.rs b/src/stream/src/executor/top_n/top_n_appendonly.rs index 0397a286036fb..fcdab8550f2a3 100644 --- a/src/stream/src/executor/top_n/top_n_appendonly.rs +++ b/src/stream/src/executor/top_n/top_n_appendonly.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::row::{RowDeserializer, RowExt}; use risingwave_common::util::epoch::EpochPair; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_storage::StateStore; use super::top_n_cache::AppendOnlyTopNCacheTrait; @@ -41,9 +41,9 @@ impl AppendOnlyTopNExecutor { pub fn new_without_ties( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -69,9 +69,9 @@ impl AppendOnlyTopNExecutor { pub fn new_with_ties( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -113,9 +113,9 @@ impl InnerAppendOnlyTopNExecutor, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -129,6 +129,7 @@ impl InnerAppendOnlyTopNExecutor::new(state_table, cache_key_serde.clone()); + let data_types = schema.data_types(); Ok(Self { info: ExecutorInfo { @@ -137,8 +138,8 @@ impl InnerAppendOnlyTopNExecutor Vec { + fn storage_key() -> Vec { order_by() } - fn order_by() -> Vec { + fn order_by() -> Vec { vec![ - OrderPair::new(0, OrderType::Ascending), - OrderPair::new(1, OrderType::Ascending), + ColumnOrder::new(0, OrderType::ascending()), + ColumnOrder::new(1, OrderType::ascending()), ] } @@ -284,7 +285,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -366,7 +367,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; diff --git a/src/stream/src/executor/top_n/top_n_cache.rs b/src/stream/src/executor/top_n/top_n_cache.rs index b500cd8eff90f..e99f3bffba29c 100644 --- a/src/stream/src/executor/top_n/top_n_cache.rs +++ b/src/stream/src/executor/top_n/top_n_cache.rs @@ -14,10 +14,13 @@ use std::cmp::Ordering; use std::collections::BTreeMap; +use std::fmt::Debug; use async_trait::async_trait; +use itertools::Itertools; use risingwave_common::array::{Op, RowRef}; -use risingwave_common::row::{CompactedRow, Row, RowDeserializer}; +use risingwave_common::row::{CompactedRow, Row, RowDeserializer, RowExt}; +use risingwave_common::types::DataType; use risingwave_storage::StateStore; use crate::executor::error::StreamExecutorResult; @@ -53,6 +56,52 @@ pub struct TopNCache { pub offset: usize, /// Assumption: `limit != 0` pub limit: usize, + + /// Data types for the full row. + /// + /// For debug formatting only. + data_types: Vec, +} + +impl Debug for TopNCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "TopNCache {{\n offset: {}, limit: {}, high_capacity: {},\n", + self.offset, self.limit, self.high_capacity + )?; + + fn format_cache( + f: &mut std::fmt::Formatter<'_>, + cache: &BTreeMap, + data_types: &[DataType], + ) -> std::fmt::Result { + if cache.is_empty() { + return write!(f, " "); + } + write!( + f, + " {}", + cache + .iter() + .format_with("\n ", |item, f| f(&format_args!( + "{:?}, {}", + item.0, + item.1.deserialize(data_types).unwrap().display(), + ))) + ) + } + + writeln!(f, " low:")?; + format_cache(f, &self.low, &self.data_types)?; + writeln!(f, "\n middle:")?; + format_cache(f, &self.middle, &self.data_types)?; + writeln!(f, "\n high:")?; + format_cache(f, &self.high, &self.data_types)?; + + write!(f, "\n}}")?; + Ok(()) + } } /// `CacheKey` is composed of `(order_by, remaining columns of pk)`. @@ -67,10 +116,11 @@ pub trait TopNCacheTrait { /// /// Changes in `self.middle` is recorded to `res_ops` and `res_rows`, which will be /// used to generate messages to be sent to downstream operators. + #[allow(clippy::too_many_arguments)] fn insert( &mut self, cache_key: CacheKey, - row: impl Row, + row: impl Row + Send, res_ops: &mut Vec, res_rows: &mut Vec, ); @@ -96,7 +146,8 @@ pub trait TopNCacheTrait { } impl TopNCache { - pub fn new(offset: usize, limit: usize) -> Self { + /// `data_types` -- Data types for the full row. + pub fn new(offset: usize, limit: usize, data_types: Vec) -> Self { assert!(limit != 0); if WITH_TIES { // It's trickier to support. @@ -113,6 +164,7 @@ impl TopNCache { .unwrap_or(usize::MAX), offset, limit, + data_types, } } @@ -137,13 +189,19 @@ impl TopNCache { pub fn is_middle_cache_full(&self) -> bool { // For WITH_TIES, the middle cache can exceed the capacity. if !WITH_TIES { - assert!(self.middle.len() <= self.limit); + assert!( + self.middle.len() <= self.limit, + "the middle cache exceeds the capacity\n{self:?}" + ); } let full = self.middle.len() >= self.limit; if full { assert!(self.is_low_cache_full()); } else { - assert!(self.high.is_empty()); + assert!( + self.high.is_empty(), + "the high cache is not empty when middle cache is not full:\n{self:?}" + ); } full } @@ -153,11 +211,33 @@ impl TopNCache { if !WITH_TIES { assert!(self.high.len() <= self.high_capacity); } - let full = self.high.len() >= self.high_capacity; - if full { - assert!(self.is_middle_cache_full()); + self.high.len() >= self.high_capacity + } + + /// Use this method instead of `self.high.insert` directly when possible. + /// + /// It only inserts into high cache if the key is smaller than the largest key in the high + /// cache. Otherwise, we simply ignore the row. We will wait until the high cache becomes + /// empty and fill it at that time. + fn insert_high_cache(&mut self, cache_key: CacheKey, row: CompactedRow, is_from_middle: bool) { + if !self.is_high_cache_full() { + if is_from_middle { + self.high.insert(cache_key, row); + return; + } + // For direct insert, we need to check if the key is smaller than the largest key + if let Some(high_last) = self.high.last_key_value() && cache_key <= *high_last.0 { + debug_assert!(cache_key != *high_last.0, "cache_key should be unique"); + self.high.insert(cache_key, row); + } + } else { + let high_last = self.high.last_entry().unwrap(); + if cache_key <= *high_last.key() { + debug_assert!(cache_key != *high_last.key(), "cache_key should be unique"); + high_last.remove_entry(); + self.high.insert(cache_key, row); + } } - full } } @@ -166,7 +246,7 @@ impl TopNCacheTrait for TopNCache { fn insert( &mut self, cache_key: CacheKey, - row: impl Row, + row: impl Row + Send, res_ops: &mut Vec, res_rows: &mut Vec, ) { @@ -195,6 +275,7 @@ impl TopNCacheTrait for TopNCache { return; } + let mut is_from_middle = false; let elem_to_compare_with_high = { let middle_last = self.middle.last_entry().unwrap(); if elem_to_compare_with_middle.0 <= *middle_last.key() { @@ -207,23 +288,18 @@ impl TopNCacheTrait for TopNCache { res_rows.push(elem_to_compare_with_middle.1.clone()); self.middle .insert(elem_to_compare_with_middle.0, elem_to_compare_with_middle.1); + is_from_middle = true; res } else { elem_to_compare_with_middle } }; - if !self.is_high_cache_full() { - self.high - .insert(elem_to_compare_with_high.0, elem_to_compare_with_high.1); - } else { - let high_last = self.high.last_entry().unwrap(); - if elem_to_compare_with_high.0 <= *high_last.key() { - high_last.remove_entry(); - self.high - .insert(elem_to_compare_with_high.0, elem_to_compare_with_high.1); - } - } + self.insert_high_cache( + elem_to_compare_with_high.0, + elem_to_compare_with_high.1, + is_from_middle, + ); } #[allow(clippy::too_many_arguments)] @@ -308,7 +384,7 @@ impl TopNCacheTrait for TopNCache { fn insert( &mut self, cache_key: CacheKey, - row: impl Row, + row: impl Row + Send, res_ops: &mut Vec, res_rows: &mut Vec, ) { @@ -381,21 +457,11 @@ impl TopNCacheTrait for TopNCache { Ordering::Greater => { // The row is in high. let elem_to_compare_with_high = elem_to_compare_with_middle; - if !self.is_high_cache_full() { - self.high.insert( - elem_to_compare_with_high.0, - (&elem_to_compare_with_high.1).into(), - ); - } else { - let high_last = self.high.last_entry().unwrap(); - if elem_to_compare_with_high.0 <= *high_last.key() { - high_last.remove_entry(); - self.high.insert( - elem_to_compare_with_high.0, - (&elem_to_compare_with_high.1).into(), - ); - } - } + self.insert_high_cache( + elem_to_compare_with_high.0, + elem_to_compare_with_high.1.into(), + false, + ); } } } diff --git a/src/stream/src/executor/top_n/top_n_plain.rs b/src/stream/src/executor/top_n/top_n_plain.rs index fc552d8c20d43..faba24ef9d35f 100644 --- a/src/stream/src/executor/top_n/top_n_plain.rs +++ b/src/stream/src/executor/top_n/top_n_plain.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use risingwave_common::array::{Op, StreamChunk}; use risingwave_common::row::RowExt; use risingwave_common::util::epoch::EpochPair; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_storage::StateStore; use super::utils::*; @@ -30,16 +30,16 @@ use crate::executor::{ActorContextRef, Executor, ExecutorInfo, PkIndices, Waterm /// `TopNExecutor` works with input with modification, it keeps all the data /// records/rows that have been seen, and returns topN records overall. pub type TopNExecutor = - TopNExecutorWrapper>; + TopNExecutorWrapper>; impl TopNExecutor { #[allow(clippy::too_many_arguments)] pub fn new_without_ties( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -48,7 +48,7 @@ impl TopNExecutor { Ok(TopNExecutorWrapper { input, ctx, - inner: InnerTopNExecutorNew::new( + inner: InnerTopNExecutor::new( info, storage_key, offset_and_limit, @@ -65,9 +65,9 @@ impl TopNExecutor { pub fn new_with_ties( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -76,7 +76,7 @@ impl TopNExecutor { Ok(TopNExecutorWrapper { input, ctx, - inner: InnerTopNExecutorNew::new( + inner: InnerTopNExecutor::new( info, storage_key, offset_and_limit, @@ -94,15 +94,15 @@ impl TopNExecutor { pub fn new_with_ties_for_test( input: Box, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { let info = input.info(); - let mut inner = InnerTopNExecutorNew::new( + let mut inner = InnerTopNExecutor::new( info, storage_key, offset_and_limit, @@ -111,13 +111,13 @@ impl TopNExecutor { state_table, )?; - inner.cache.high_capacity = 1; + inner.cache.high_capacity = 2; Ok(TopNExecutorWrapper { input, ctx, inner }) } } -pub struct InnerTopNExecutorNew { +pub struct InnerTopNExecutor { info: ExecutorInfo, /// The storage key indices of the `TopNExecutor` @@ -132,7 +132,7 @@ pub struct InnerTopNExecutorNew { cache_key_serde: CacheKeySerde, } -impl InnerTopNExecutorNew { +impl InnerTopNExecutor { /// # Arguments /// /// `storage_key` -- the storage pk. It's composed of the ORDER BY columns and the missing @@ -143,9 +143,9 @@ impl InnerTopNExecutorNew { #[allow(clippy::too_many_arguments)] pub fn new( input_info: ExecutorInfo, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, state_table: StateTable, ) -> StreamResult { @@ -158,6 +158,7 @@ impl InnerTopNExecutorNew { let cache_key_serde = create_cache_key_serde(&storage_key, &pk_indices, &schema, &order_by, &[]); let managed_state = ManagedTopNState::::new(state_table, cache_key_serde.clone()); + let data_types = schema.data_types(); Ok(Self { info: ExecutorInfo { @@ -166,15 +167,15 @@ impl InnerTopNExecutorNew { identity: format!("TopNExecutor {:X}", executor_id), }, managed_state, - storage_key_indices: storage_key.into_iter().map(|op| op.column_idx).collect(), - cache: TopNCache::new(num_offset, num_limit), + storage_key_indices: storage_key.into_iter().map(|op| op.column_index).collect(), + cache: TopNCache::new(num_offset, num_limit, data_types), cache_key_serde, }) } } #[async_trait] -impl TopNExecutorBase for InnerTopNExecutorNew +impl TopNExecutorBase for InnerTopNExecutor where TopNCache: TopNCacheTrait, { @@ -295,14 +296,14 @@ mod tests { } } - fn storage_key() -> Vec { + fn storage_key() -> Vec { let mut v = order_by(); - v.extend([OrderPair::new(1, OrderType::Ascending)]); + v.extend([ColumnOrder::new(1, OrderType::ascending())]); v } - fn order_by() -> Vec { - vec![OrderPair::new(0, OrderType::Ascending)] + fn order_by() -> Vec { + vec![ColumnOrder::new(0, OrderType::ascending())] } fn pk_indices() -> PkIndices { @@ -334,7 +335,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -430,7 +431,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -538,7 +539,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -645,7 +646,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -847,14 +848,14 @@ mod tests { )) } - fn storage_key() -> Vec { + fn storage_key() -> Vec { order_by() } - fn order_by() -> Vec { + fn order_by() -> Vec { vec![ - OrderPair::new(0, OrderType::Ascending), - OrderPair::new(3, OrderType::Ascending), + ColumnOrder::new(0, OrderType::ascending()), + ColumnOrder::new(3, OrderType::ascending()), ] } @@ -872,7 +873,7 @@ mod tests { DataType::Int64, DataType::Int64, ], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -949,7 +950,7 @@ mod tests { DataType::Int64, DataType::Int64, ], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), state_store.clone(), ) @@ -1001,7 +1002,7 @@ mod tests { DataType::Int64, DataType::Int64, ], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), state_store, ) @@ -1079,10 +1080,11 @@ mod tests { ), StreamChunk::from_pretty( " I I - + 3 8 - + 1 6 - + 2 7 - + 10 9", + + 3 6 + + 3 7 + + 1 8 + + 2 9 + + 10 10", ), StreamChunk::from_pretty( " I I @@ -1090,7 +1092,7 @@ mod tests { ), StreamChunk::from_pretty( " I I - - 1 6", + - 1 8", ), ]; let schema = Schema { @@ -1113,14 +1115,14 @@ mod tests { )) } - fn storage_key() -> Vec { + fn storage_key() -> Vec { let mut v = order_by(); - v.push(OrderPair::new(1, OrderType::Ascending)); + v.push(ColumnOrder::new(1, OrderType::ascending())); v } - fn order_by() -> Vec { - vec![OrderPair::new(0, OrderType::Ascending)] + fn order_by() -> Vec { + vec![ColumnOrder::new(0, OrderType::ascending())] } fn pk_indices() -> PkIndices { @@ -1132,7 +1134,7 @@ mod tests { let source = create_source(); let state_table = create_in_memory_state_table( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), ) .await; @@ -1168,11 +1170,13 @@ mod tests { *res.as_chunk().unwrap(), StreamChunk::from_pretty( " I I - + 3 8 - - 3 8 + + 3 6 + + 3 7 + - 3 7 + - 3 6 - 3 2 - + 1 6 - + 2 7" + + 1 8 + + 2 9" ) ); @@ -1185,15 +1189,16 @@ mod tests { ) ); - // High cache has only one capacity, but we need to trigger 2 inserts here! + // High cache has only 2 capacity, but we need to trigger 3 inserts here! let res = top_n_executor.next().await.unwrap().unwrap(); assert_eq!( *res.as_chunk().unwrap(), StreamChunk::from_pretty( " I I - - 1 6 + - 1 8 + 3 2 - + 3 8 + + 3 6 + + 3 7 " ) ); @@ -1219,10 +1224,11 @@ mod tests { ), StreamChunk::from_pretty( " I I - + 3 8 - + 1 6 - + 2 7 - + 10 9", + + 3 6 + + 3 7 + + 1 8 + + 2 9 + + 10 10", ), ]; let schema = Schema { @@ -1251,7 +1257,7 @@ mod tests { ), StreamChunk::from_pretty( " I I - - 1 6", + - 1 8", ), ]; let schema = Schema { @@ -1277,7 +1283,7 @@ mod tests { let state_store = MemoryStateStore::new(); let state_table = create_in_memory_state_table_from_state_store( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), state_store.clone(), ) @@ -1314,11 +1320,13 @@ mod tests { *res.as_chunk().unwrap(), StreamChunk::from_pretty( " I I - + 3 8 - - 3 8 + + 3 6 + + 3 7 + - 3 7 + - 3 6 - 3 2 - + 1 6 - + 2 7" + + 1 8 + + 2 9" ) ); @@ -1330,7 +1338,7 @@ mod tests { let state_table = create_in_memory_state_table_from_state_store( &[DataType::Int64, DataType::Int64], - &[OrderType::Ascending, OrderType::Ascending], + &[OrderType::ascending(), OrderType::ascending()], &pk_indices(), state_store, ) @@ -1366,19 +1374,20 @@ mod tests { ) ); - // High cache has only one capacity, but we need to trigger 2 inserts here! + // High cache has only 2 capacity, but we need to trigger 3 inserts here! let res = top_n_executor.next().await.unwrap().unwrap(); assert_eq!( *res.as_chunk().unwrap(), StreamChunk::from_pretty( " I I - - 1 6 + - 1 8 + 3 2 - + 3 8 + + 3 6 + + 3 7 " ) ); - + println!("hello"); // barrier assert_matches!( top_n_executor.next().await.unwrap().unwrap(), diff --git a/src/stream/src/executor/top_n/utils.rs b/src/stream/src/executor/top_n/utils.rs index 0b5676c90c23f..e062b72e8f463 100644 --- a/src/stream/src/executor/top_n/utils.rs +++ b/src/stream/src/executor/top_n/utils.rs @@ -26,7 +26,7 @@ use risingwave_common::row::{CompactedRow, Row, RowDeserializer}; use risingwave_common::util::chunk_coalesce::DataChunkBuilder; use risingwave_common::util::epoch::EpochPair; use risingwave_common::util::ordered::OrderedRowSerde; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use super::top_n_cache::CacheKey; use crate::executor::error::{StreamExecutorError, StreamExecutorResult}; @@ -191,16 +191,16 @@ pub fn serialize_pk_to_cache_key(pk: impl Row, cache_key_serde: &CacheKeySerde) pub type CacheKeySerde = (OrderedRowSerde, OrderedRowSerde, usize); pub fn create_cache_key_serde( - storage_key: &[OrderPair], + storage_key: &[ColumnOrder], pk_indices: PkIndicesRef<'_>, schema: &Schema, - order_by: &[OrderPair], + order_by: &[ColumnOrder], group_by: &[usize], ) -> CacheKeySerde { { // validate storage_key = group_by + order_by + additional_pk for i in 0..group_by.len() { - assert_eq!(storage_key[i].column_idx, group_by[i]); + assert_eq!(storage_key[i].column_index, group_by[i]); } for i in group_by.len()..(group_by.len() + order_by.len()) { assert_eq!(storage_key[i], order_by[i - group_by.len()]); @@ -208,7 +208,7 @@ pub fn create_cache_key_serde( let pk_indices = pk_indices.iter().copied().collect::>(); for i in (group_by.len() + order_by.len())..storage_key.len() { assert!( - pk_indices.contains(&storage_key[i].column_idx), + pk_indices.contains(&storage_key[i].column_index), "storage_key = {:?}, pk_indices = {:?}", storage_key, pk_indices @@ -219,7 +219,7 @@ pub fn create_cache_key_serde( let (cache_key_data_types, cache_key_order_types): (Vec<_>, Vec<_>) = storage_key [group_by.len()..] .iter() - .map(|o| (schema[o.column_idx].data_type(), o.order_type)) + .map(|o| (schema[o.column_index].data_type(), o.order_type)) .unzip(); let order_by_len = order_by.len(); diff --git a/src/stream/src/executor/watermark_filter.rs b/src/stream/src/executor/watermark_filter.rs index a9f6f67b42fa1..515d3acb97754 100644 --- a/src/stream/src/executor/watermark_filter.rs +++ b/src/stream/src/executor/watermark_filter.rs @@ -23,19 +23,18 @@ use risingwave_common::row::{OwnedRow, Row}; use risingwave_common::types::{DataType, ScalarImpl}; use risingwave_common::{bail, row}; use risingwave_expr::expr::{ - new_binary_expr, BoxedExpression, Expression, InputRefExpression, LiteralExpression, + build, BoxedExpression, Expression, InputRefExpression, LiteralExpression, }; use risingwave_expr::Result as ExprResult; use risingwave_pb::expr::expr_node::Type; use risingwave_storage::StateStore; use super::error::StreamExecutorError; -use super::filter::SimpleFilterExecutor; +use super::filter::FilterExecutor; use super::{ ActorContextRef, BoxedExecutor, Executor, ExecutorInfo, Message, StreamExecutorResult, }; use crate::common::table::state_table::StateTable; -use crate::common::InfallibleExpression; use crate::executor::{expect_first_barrier, Watermark}; /// The executor will generate a `Watermark` after each chunk. @@ -146,7 +145,8 @@ impl WatermarkFilterExecutor { let watermark_array = watermark_expr .eval_infallible(chunk.data_chunk(), |err| { ctx.on_compute_error(err, &info.identity) - }); + }) + .await; // Build the expression to calculate watermark filter. let watermark_filter_expr = Self::build_watermark_filter_expr( @@ -167,9 +167,10 @@ impl WatermarkFilterExecutor { let pred_output = watermark_filter_expr .eval_infallible(chunk.data_chunk(), |err| { ctx.on_compute_error(err, &info.identity) - }); + }) + .await; - if let Some(output_chunk) = SimpleFilterExecutor::filter(chunk, pred_output)? { + if let Some(output_chunk) = FilterExecutor::filter(chunk, pred_output)? { yield Message::Chunk(output_chunk); }; @@ -198,7 +199,8 @@ impl WatermarkFilterExecutor { Message::Barrier(barrier) => { // Update the vnode bitmap for state tables of all agg calls if asked. if let Some(vnode_bitmap) = barrier.as_update_vnode_bitmap(ctx.id) { - let previous_vnode_bitmap = table.update_vnode_bitmap(vnode_bitmap.clone()); + let (previous_vnode_bitmap, _cache_may_stale) = + table.update_vnode_bitmap(vnode_bitmap.clone()); // Take the global max watermark when scaling happens. if previous_vnode_bitmap != vnode_bitmap { @@ -234,11 +236,13 @@ impl WatermarkFilterExecutor { event_time_col_idx: usize, watermark: ScalarImpl, ) -> ExprResult { - new_binary_expr( + build( Type::GreaterThanOrEqual, DataType::Boolean, - InputRefExpression::new(watermark_type.clone(), event_time_col_idx).boxed(), - LiteralExpression::new(watermark_type, Some(watermark)).boxed(), + vec![ + InputRefExpression::new(watermark_type.clone(), event_time_col_idx).boxed(), + LiteralExpression::new(watermark_type, Some(watermark)).boxed(), + ], ) } @@ -331,22 +335,26 @@ mod tests { ], }; - let watermark_expr = new_binary_expr( + let watermark_expr = build( Type::Subtract, WATERMARK_TYPE.clone(), - InputRefExpression::new(WATERMARK_TYPE.clone(), 1).boxed(), - LiteralExpression::new( - interval_type, - Some(ScalarImpl::Interval(IntervalUnit::new(0, 1, 0))), - ) - .boxed(), + vec![ + InputRefExpression::new(WATERMARK_TYPE.clone(), 1).boxed(), + LiteralExpression::new( + interval_type, + Some(ScalarImpl::Interval(IntervalUnit::from_month_day_usec( + 0, 1, 0, + ))), + ) + .boxed(), + ], ) .unwrap(); let table = create_in_memory_state_table( mem_state, &[DataType::Int16, WATERMARK_TYPE], - &[OrderType::Ascending], + &[OrderType::ascending()], &[0], &[1], 0, diff --git a/src/stream/src/executor/wrapper.rs b/src/stream/src/executor/wrapper.rs index 158508d3dc5c6..b2e574b095599 100644 --- a/src/stream/src/executor/wrapper.rs +++ b/src/stream/src/executor/wrapper.rs @@ -118,6 +118,8 @@ impl WrapperExecutor { let stream = trace::instrument_await_tree(info.clone(), extra.actor_id, extra.executor_id, stream); + // Schema check + let stream = schema_check::schema_check(info.clone(), stream); // Epoch check let stream = epoch_check::epoch_check(info, stream); diff --git a/src/stream/src/executor/wrapper/schema_check.rs b/src/stream/src/executor/wrapper/schema_check.rs index 4bc99d3b3c8bb..d23eca2b455c6 100644 --- a/src/stream/src/executor/wrapper/schema_check.rs +++ b/src/stream/src/executor/wrapper/schema_check.rs @@ -27,13 +27,25 @@ pub async fn schema_check(info: Arc, input: impl MessageStream) { for message in input { let message = message?; - if let Message::Chunk(chunk) = &message { - risingwave_common::util::schema_check::schema_check( + match &message { + Message::Chunk(chunk) => risingwave_common::util::schema_check::schema_check( info.schema.fields().iter().map(|f| &f.data_type), chunk.columns(), - ) - .unwrap_or_else(|e| panic!("schema check failed on {}: {}", info.identity, e)); + ), + Message::Watermark(watermark) => { + let expected = info.schema.fields()[watermark.col_idx].data_type(); + let found = &watermark.data_type; + if &expected != found { + Err(format!( + "watermark type mismatched: expected {expected}, found {found}" + )) + } else { + Ok(()) + } + } + Message::Barrier(_) => Ok(()), } + .unwrap_or_else(|e| panic!("schema check failed on {}: {}", info.identity, e)); yield message; } diff --git a/src/stream/src/from_proto/agg_common.rs b/src/stream/src/from_proto/agg_common.rs index bf409a699a410..9a527df245133 100644 --- a/src/stream/src/from_proto/agg_common.rs +++ b/src/stream/src/from_proto/agg_common.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use risingwave_common::bail; use risingwave_common::buffer::Bitmap; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::{OrderPair, OrderType}; +use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_expr::expr::{build_from_prost, AggKind}; use super::*; @@ -46,16 +46,13 @@ pub fn build_agg_call_from_prost( ), _ => bail!("Too many/few arguments for {:?}", agg_kind), }; - let order_pairs = agg_call_proto + let column_orders = agg_call_proto .get_order_by() .iter() .map(|col_order| { let col_idx = col_order.get_column_index() as usize; - let order_type = - OrderType::from_protobuf(&col_order.get_order_type().unwrap().direction()); - // TODO(yuchao): `nulls first/last` is not supported yet, so it's ignore here, - // see also `risingwave_common::util::sort_util::compare_values` - OrderPair::new(col_idx, order_type) + let order_type = OrderType::from_protobuf(col_order.get_order_type().unwrap()); + ColumnOrder::new(col_idx, order_type) }) .collect(); let filter = match agg_call_proto.filter { @@ -66,7 +63,7 @@ pub fn build_agg_call_from_prost( kind: agg_kind, args, return_type: DataType::from(agg_call_proto.get_return_type()?), - order_pairs, + column_orders, append_only, filter, distinct: agg_call_proto.distinct, diff --git a/src/stream/src/from_proto/barrier_recv.rs b/src/stream/src/from_proto/barrier_recv.rs new file mode 100644 index 0000000000000..d4e164a38e456 --- /dev/null +++ b/src/stream/src/from_proto/barrier_recv.rs @@ -0,0 +1,49 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use risingwave_pb::stream_plan::BarrierRecvNode; +use tokio::sync::mpsc::unbounded_channel; + +use super::*; +use crate::executor::BarrierRecvExecutor; + +pub struct BarrierRecvExecutorBuilder; + +#[async_trait::async_trait] +impl ExecutorBuilder for BarrierRecvExecutorBuilder { + type Node = BarrierRecvNode; + + async fn new_boxed_executor( + params: ExecutorParams, + _node: &Self::Node, + _store: impl StateStore, + stream: &mut LocalStreamManagerCore, + ) -> StreamResult { + assert!( + params.input.is_empty(), + "barrier receiver should not have input" + ); + + let (sender, barrier_receiver) = unbounded_channel(); + stream + .context + .lock_barrier_manager() + .register_sender(params.actor_context.id, sender); + + Ok( + BarrierRecvExecutor::new(params.actor_context, barrier_receiver, params.executor_id) + .boxed(), + ) + } +} diff --git a/src/stream/src/from_proto/batch_query.rs b/src/stream/src/from_proto/batch_query.rs index 24ee256da0d08..7d24f04bed1fc 100644 --- a/src/stream/src/from_proto/batch_query.rs +++ b/src/stream/src/from_proto/batch_query.rs @@ -44,7 +44,7 @@ impl ExecutorBuilder for BatchQueryExecutorBuilder { let order_types = table_desc .pk .iter() - .map(|desc| OrderType::from_protobuf(&desc.get_order_type().unwrap().direction())) + .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) .collect_vec(); let column_descs = table_desc @@ -66,14 +66,14 @@ impl ExecutorBuilder for BatchQueryExecutorBuilder { .map(|k| k.column_index as usize) .collect_vec(); - let dist_key_indices = table_desc - .dist_key_indices + let dist_key_in_pk_indices = table_desc + .dist_key_in_pk_indices .iter() .map(|&k| k as usize) .collect_vec(); let distribution = match params.vnode_bitmap { Some(vnodes) => Distribution { - dist_key_indices, + dist_key_in_pk_indices, vnodes: vnodes.into(), }, None => Distribution::fallback(), diff --git a/src/stream/src/from_proto/chain.rs b/src/stream/src/from_proto/chain.rs index 241c93aaedf19..2cf464acb9522 100644 --- a/src/stream/src/from_proto/chain.rs +++ b/src/stream/src/from_proto/chain.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use risingwave_common::catalog::{ColumnDesc, TableId, TableOption}; +use risingwave_common::catalog::{ColumnDesc, ColumnId, TableId, TableOption}; use risingwave_common::util::sort_util::OrderType; use risingwave_pb::plan_common::StorageTableDesc; use risingwave_pb::stream_plan::{ChainNode, ChainType}; @@ -36,12 +36,6 @@ impl ExecutorBuilder for ChainExecutorBuilder { ) -> StreamResult { let [mview, snapshot]: [_; 2] = params.input.try_into().unwrap(); - let upstream_indices: Vec = node - .upstream_column_indices - .iter() - .map(|&i| i as usize) - .collect(); - // For reporting the progress. let progress = stream .context @@ -51,36 +45,36 @@ impl ExecutorBuilder for ChainExecutorBuilder { // its schema. let schema = snapshot.schema().clone(); + let output_indices = node + .output_indices + .iter() + .map(|&i| i as usize) + .collect_vec(); + + // For `Chain`s other than `Backfill`, there should be no extra mapping required. We can + // directly output the columns received from the upstream or snapshot. + if !matches!(node.chain_type(), ChainType::Backfill) { + let all_indices = (0..schema.len()).collect_vec(); + assert_eq!(output_indices, all_indices); + } + let executor = match node.chain_type() { - ChainType::Chain => ChainExecutor::new( - snapshot, - mview, - upstream_indices, - progress, - schema, - params.pk_indices, - false, - ) - .boxed(), - ChainType::UpstreamOnly => ChainExecutor::new( - snapshot, - mview, - upstream_indices, - progress, - schema, - params.pk_indices, - true, - ) - .boxed(), - ChainType::Rearrange => RearrangedChainExecutor::new( - snapshot, - mview, - upstream_indices, - progress, - schema, - params.pk_indices, - ) - .boxed(), + ChainType::Chain | ChainType::UpstreamOnly => { + let upstream_only = matches!(node.chain_type(), ChainType::UpstreamOnly); + ChainExecutor::new( + snapshot, + mview, + progress, + schema, + params.pk_indices, + upstream_only, + ) + .boxed() + } + ChainType::Rearrange => { + RearrangedChainExecutor::new(snapshot, mview, progress, schema, params.pk_indices) + .boxed() + } ChainType::Backfill => { let table_desc: &StorageTableDesc = node.get_table_desc()?; let table_id = TableId { @@ -90,9 +84,7 @@ impl ExecutorBuilder for ChainExecutorBuilder { let order_types = table_desc .pk .iter() - .map(|desc| { - OrderType::from_protobuf(&desc.get_order_type().unwrap().direction()) - }) + .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) .collect_vec(); let column_descs = table_desc @@ -100,7 +92,11 @@ impl ExecutorBuilder for ChainExecutorBuilder { .iter() .map(ColumnDesc::from) .collect_vec(); - let column_ids = column_descs.iter().map(|x| x.column_id).collect_vec(); + let column_ids = node + .upstream_column_ids + .iter() + .map(ColumnId::from) + .collect_vec(); // Use indices based on full table instead of streaming executor output. let pk_indices = table_desc @@ -109,14 +105,14 @@ impl ExecutorBuilder for ChainExecutorBuilder { .map(|k| k.column_index as usize) .collect_vec(); - let dist_key_indices = table_desc - .dist_key_indices + let dist_key_in_pk_indices = table_desc + .dist_key_in_pk_indices .iter() .map(|&k| k as usize) .collect_vec(); let distribution = match params.vnode_bitmap { Some(vnodes) => Distribution { - dist_key_indices, + dist_key_in_pk_indices, vnodes: vnodes.into(), }, None => Distribution::fallback(), @@ -154,7 +150,7 @@ impl ExecutorBuilder for ChainExecutorBuilder { BackfillExecutor::new( table, mview, - upstream_indices, + output_indices, progress, schema, params.pk_indices, diff --git a/src/stream/src/from_proto/group_top_n.rs b/src/stream/src/from_proto/group_top_n.rs index 3284da6f32c99..9ca78c9a0ba7e 100644 --- a/src/stream/src/from_proto/group_top_n.rs +++ b/src/stream/src/from_proto/group_top_n.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use risingwave_common::hash::{HashKey, HashKeyDispatcher}; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_pb::stream_plan::GroupTopNNode; use super::*; @@ -47,14 +47,18 @@ impl ExecutorBuilder for GroupTopNExecutorBuilder { let storage_key = table .get_pk() .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) .collect(); let [input]: [_; 1] = params.input.try_into().unwrap(); let group_key_types = group_by .iter() .map(|i| input.schema()[*i].data_type()) .collect(); - let order_by = node.order_by.iter().map(OrderPair::from_protobuf).collect(); + let order_by = node + .order_by + .iter() + .map(ColumnOrder::from_protobuf) + .collect(); assert_eq!(¶ms.pk_indices, input.pk_indices()); let args = GroupTopNExecutorDispatcherArgs { @@ -77,9 +81,9 @@ impl ExecutorBuilder for GroupTopNExecutorBuilder { struct GroupTopNExecutorDispatcherArgs { input: BoxedExecutor, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, diff --git a/src/stream/src/from_proto/group_top_n_appendonly.rs b/src/stream/src/from_proto/group_top_n_appendonly.rs index 1d8d4137cd9a9..4312a4484ba0c 100644 --- a/src/stream/src/from_proto/group_top_n_appendonly.rs +++ b/src/stream/src/from_proto/group_top_n_appendonly.rs @@ -30,7 +30,7 @@ use std::sync::Arc; use risingwave_common::hash::{HashKey, HashKeyDispatcher}; use risingwave_common::types::DataType; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_pb::stream_plan::GroupTopNNode; use super::*; @@ -61,14 +61,18 @@ impl ExecutorBuilder for AppendOnlyGroupTopNExecutorBuilder { let storage_key = table .get_pk() .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) .collect(); let [input]: [_; 1] = params.input.try_into().unwrap(); let group_key_types = group_by .iter() .map(|i| input.schema()[*i].data_type()) .collect(); - let order_by = node.order_by.iter().map(OrderPair::from_protobuf).collect(); + let order_by = node + .order_by + .iter() + .map(ColumnOrder::from_protobuf) + .collect(); assert_eq!(¶ms.pk_indices, input.pk_indices()); let args = AppendOnlyGroupTopNExecutorDispatcherArgs { @@ -91,9 +95,9 @@ impl ExecutorBuilder for AppendOnlyGroupTopNExecutorBuilder { struct AppendOnlyGroupTopNExecutorDispatcherArgs { input: BoxedExecutor, ctx: ActorContextRef, - storage_key: Vec, + storage_key: Vec, offset_and_limit: (usize, usize), - order_by: Vec, + order_by: Vec, executor_id: u64, group_by: Vec, state_table: StateTable, diff --git a/src/stream/src/from_proto/lookup.rs b/src/stream/src/from_proto/lookup.rs index 166c9c1eff9a9..df574e5587214 100644 --- a/src/stream/src/from_proto/lookup.rs +++ b/src/stream/src/from_proto/lookup.rs @@ -13,7 +13,7 @@ // limitations under the License. use risingwave_common::catalog::{ColumnDesc, TableId, TableOption}; -use risingwave_common::util::sort_util::{OrderPair, OrderType}; +use risingwave_common::util::sort_util::{ColumnOrder, OrderType}; use risingwave_pb::plan_common::StorageTableDesc; use risingwave_pb::stream_plan::LookupNode; use risingwave_storage::table::batch_table::storage_table::StorageTable; @@ -42,7 +42,7 @@ impl ExecutorBuilder for LookupExecutorBuilder { .get_arrangement_table_info()? .arrange_key_orders .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) .collect(); let arrangement_col_descs = lookup @@ -65,7 +65,7 @@ impl ExecutorBuilder for LookupExecutorBuilder { let order_types = table_desc .pk .iter() - .map(|desc| OrderType::from_protobuf(&desc.get_order_type().unwrap().direction())) + .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) .collect_vec(); let column_descs = table_desc @@ -82,14 +82,14 @@ impl ExecutorBuilder for LookupExecutorBuilder { .map(|k| k.column_index as usize) .collect_vec(); - let dist_key_indices = table_desc - .dist_key_indices + let dist_key_in_pk_indices = table_desc + .dist_key_in_pk_indices .iter() .map(|&k| k as usize) .collect_vec(); let distribution = match params.vnode_bitmap { Some(vnodes) => Distribution { - dist_key_indices, + dist_key_in_pk_indices, vnodes: vnodes.into(), }, None => Distribution::fallback(), @@ -125,6 +125,7 @@ impl ExecutorBuilder for LookupExecutorBuilder { ); Ok(Box::new(LookupExecutor::new(LookupExecutorParams { + ctx: params.actor_context, schema: params.schema, arrangement, stream, diff --git a/src/stream/src/from_proto/mod.rs b/src/stream/src/from_proto/mod.rs index 2b800417430a1..d292a39644479 100644 --- a/src/stream/src/from_proto/mod.rs +++ b/src/stream/src/from_proto/mod.rs @@ -15,6 +15,7 @@ //! Build executor from protobuf. mod agg_common; +mod barrier_recv; mod batch_query; mod chain; mod dml; @@ -39,6 +40,7 @@ mod row_id_gen; mod sink; mod sort; mod source; +mod temporal_join; mod top_n; mod top_n_appendonly; mod union; @@ -47,9 +49,10 @@ mod watermark_filter; // import for submodules use itertools::Itertools; use risingwave_pb::stream_plan::stream_node::NodeBody; -use risingwave_pb::stream_plan::StreamNode; +use risingwave_pb::stream_plan::{StreamNode, TemporalJoinNode}; use risingwave_storage::StateStore; +use self::barrier_recv::*; use self::batch_query::*; use self::chain::*; use self::dml::*; @@ -74,6 +77,7 @@ use self::row_id_gen::RowIdGenExecutorBuilder; use self::sink::*; use self::sort::*; use self::source::*; +use self::temporal_join::*; use self::top_n::*; use self::top_n_appendonly::*; use self::union::*; @@ -123,7 +127,7 @@ pub async fn create_executor( NodeBody::Source => SourceExecutorBuilder, NodeBody::Sink => SinkExecutorBuilder, NodeBody::Project => ProjectExecutorBuilder, - NodeBody::TopN => TopNExecutorNewBuilder, + NodeBody::TopN => TopNExecutorBuilder, NodeBody::AppendOnlyTopN => AppendOnlyTopNExecutorBuilder, NodeBody::LocalSimpleAgg => LocalSimpleAggExecutorBuilder, NodeBody::GlobalSimpleAgg => GlobalSimpleAggExecutorBuilder, @@ -149,5 +153,7 @@ pub async fn create_executor( NodeBody::Dml => DmlExecutorBuilder, NodeBody::RowIdGen => RowIdGenExecutorBuilder, NodeBody::Now => NowExecutorBuilder, + NodeBody::TemporalJoin => TemporalJoinExecutorBuilder, + NodeBody::BarrierRecv => BarrierRecvExecutorBuilder, } } diff --git a/src/stream/src/from_proto/mview.rs b/src/stream/src/from_proto/mview.rs index 00ba4c4bd5cee..d61bc7ccfe30c 100644 --- a/src/stream/src/from_proto/mview.rs +++ b/src/stream/src/from_proto/mview.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use risingwave_common::catalog::ConflictBehavior; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_common::util::value_encoding::column_aware_row_encoding::ColumnAwareSerde; use risingwave_common::util::value_encoding::BasicSerde; use risingwave_pb::stream_plan::{ArrangeNode, MaterializeNode}; @@ -40,7 +40,7 @@ impl ExecutorBuilder for MaterializeExecutorBuilder { let order_key = node .column_orders .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) .collect(); let table = node.get_table()?; @@ -104,7 +104,7 @@ impl ExecutorBuilder for ArrangeExecutorBuilder { .get_table_info()? .arrange_key_orders .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) .collect(); let table = node.get_table()?; diff --git a/src/stream/src/from_proto/project.rs b/src/stream/src/from_proto/project.rs index e9c0808921c48..a148e39f458bc 100644 --- a/src/stream/src/from_proto/project.rs +++ b/src/stream/src/from_proto/project.rs @@ -15,6 +15,7 @@ use multimap::MultiMap; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_expr::expr::build_from_prost; +use risingwave_pb::expr::expr_node; use risingwave_pb::stream_plan::ProjectNode; use super::*; @@ -49,7 +50,11 @@ impl ExecutorBuilder for ProjectExecutorBuilder { .map(|key| *key as usize), ), ); - + let extremely_light = node.get_select_list().iter().all(|expr| { + let expr_type = expr.get_expr_type().unwrap(); + expr_type == expr_node::Type::InputRef || expr_type == expr_node::Type::ConstantValue + }); + let materialize_selectivity_threshold = if extremely_light { 0.0 } else { 0.5 }; Ok(ProjectExecutor::new( params.actor_context, input, @@ -57,6 +62,7 @@ impl ExecutorBuilder for ProjectExecutorBuilder { project_exprs, params.executor_id, watermark_derivations, + materialize_selectivity_threshold, ) .boxed()) } diff --git a/src/stream/src/from_proto/sink.rs b/src/stream/src/from_proto/sink.rs index ee7dc0628ba60..f200d7211dc1d 100644 --- a/src/stream/src/from_proto/sink.rs +++ b/src/stream/src/from_proto/sink.rs @@ -37,10 +37,10 @@ impl ExecutorBuilder for SinkExecutorBuilder { let sink_type = SinkType::from_proto(sink_desc.get_sink_type().unwrap()); let mut properties = sink_desc.get_properties().clone(); let pk_indices = sink_desc - .pk + .downstream_pk .iter() - .map(|pk| pk.column_index as usize) - .collect::>(); + .map(|i| *i as usize) + .collect_vec(); let schema = sink_desc.columns.iter().map(Into::into).collect(); // This field can be used to distinguish a specific actor in parallelism to prevent // transaction execution errors diff --git a/src/stream/src/from_proto/source.rs b/src/stream/src/from_proto/source.rs index 6f34d1107b192..2fed46c13e671 100644 --- a/src/stream/src/from_proto/source.rs +++ b/src/stream/src/from_proto/source.rs @@ -43,6 +43,12 @@ impl ExecutorBuilder for SourceExecutorBuilder { .context .lock_barrier_manager() .register_sender(params.actor_context.id, sender); + let barrier_interval_ms = params + .env + .system_params_manager_ref() + .get_params() + .load() + .barrier_interval_ms() as u64; if let Some(source) = &node.source_inner { let source_id = TableId::new(source.source_id); @@ -108,7 +114,7 @@ impl ExecutorBuilder for SourceExecutorBuilder { stream_source_core, params.executor_stats, barrier_receiver, - stream.config.barrier_interval_ms as u64, + barrier_interval_ms, params.executor_id, )?)) } else { @@ -119,7 +125,7 @@ impl ExecutorBuilder for SourceExecutorBuilder { Some(stream_source_core), params.executor_stats, barrier_receiver, - stream.config.barrier_interval_ms as u64, + barrier_interval_ms, params.executor_id, ))) } @@ -133,7 +139,7 @@ impl ExecutorBuilder for SourceExecutorBuilder { None, params.executor_stats, barrier_receiver, - stream.config.barrier_interval_ms as u64, + barrier_interval_ms, params.executor_id, ))) } diff --git a/src/stream/src/from_proto/temporal_join.rs b/src/stream/src/from_proto/temporal_join.rs new file mode 100644 index 0000000000000..7344aebd6c551 --- /dev/null +++ b/src/stream/src/from_proto/temporal_join.rs @@ -0,0 +1,217 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use risingwave_common::catalog::{ColumnDesc, TableId, TableOption}; +use risingwave_common::util::sort_util::OrderType; +use risingwave_expr::expr::{build_from_prost, BoxedExpression}; +use risingwave_pb::plan_common::{JoinType as JoinTypeProto, StorageTableDesc}; +use risingwave_storage::table::batch_table::storage_table::StorageTable; +use risingwave_storage::table::Distribution; + +use super::*; +use crate::executor::monitor::StreamingMetrics; +use crate::executor::{ActorContextRef, JoinType, PkIndices, TemporalJoinExecutor}; +use crate::task::AtomicU64Ref; + +pub struct TemporalJoinExecutorBuilder; + +#[async_trait::async_trait] +impl ExecutorBuilder for TemporalJoinExecutorBuilder { + type Node = TemporalJoinNode; + + async fn new_boxed_executor( + params: ExecutorParams, + node: &Self::Node, + store: impl StateStore, + stream: &mut LocalStreamManagerCore, + ) -> StreamResult { + let table = { + let table_desc: &StorageTableDesc = node.get_table_desc()?; + let table_id = TableId { + table_id: table_desc.table_id, + }; + + let order_types = table_desc + .pk + .iter() + .map(|desc| OrderType::from_protobuf(desc.get_order_type().unwrap())) + .collect_vec(); + + let column_descs = table_desc + .columns + .iter() + .map(ColumnDesc::from) + .collect_vec(); + let column_ids = column_descs.iter().map(|x| x.column_id).collect_vec(); + + // Use indices based on full table instead of streaming executor output. + let pk_indices = table_desc + .pk + .iter() + .map(|k| k.column_index as usize) + .collect_vec(); + + let dist_key_in_pk_indices = table_desc + .dist_key_in_pk_indices + .iter() + .map(|&k| k as usize) + .collect_vec(); + let distribution = match params.vnode_bitmap.clone() { + Some(vnodes) => Distribution { + dist_key_in_pk_indices, + vnodes: vnodes.into(), + }, + None => Distribution::fallback(), + }; + + let table_option = TableOption { + retention_seconds: if table_desc.retention_seconds > 0 { + Some(table_desc.retention_seconds) + } else { + None + }, + }; + + let value_indices = table_desc + .get_value_indices() + .iter() + .map(|&k| k as usize) + .collect_vec(); + + let prefix_hint_len = table_desc.get_read_prefix_len_hint() as usize; + + StorageTable::new_partial( + store, + table_id, + column_descs, + column_ids, + order_types, + pk_indices, + distribution, + table_option, + value_indices, + prefix_hint_len, + table_desc.versioned, + ) + }; + + let [source_l, source_r]: [_; 2] = params.input.try_into().unwrap(); + + let left_join_keys = node + .get_left_key() + .iter() + .map(|key| *key as usize) + .collect_vec(); + + let right_join_keys = node + .get_right_key() + .iter() + .map(|key| *key as usize) + .collect_vec(); + + let null_safe = node.get_null_safe().to_vec(); + + let condition = match node.get_condition() { + Ok(cond_prost) => Some(build_from_prost(cond_prost)?), + Err(_) => None, + }; + + let table_output_indices = node + .get_table_output_indices() + .iter() + .map(|&x| x as usize) + .collect_vec(); + + let output_indices = node + .get_output_indices() + .iter() + .map(|&x| x as usize) + .collect_vec(); + + let dispatcher_args = TemporalJoinExecutorDispatcherArgs { + ctx: params.actor_context, + left: source_l, + right: source_r, + right_table: table, + left_join_keys, + right_join_keys, + null_safe, + condition, + pk_indices: params.pk_indices, + output_indices, + table_output_indices, + executor_id: params.executor_id, + watermark_epoch: stream.get_watermark_epoch(), + chunk_size: params.env.config().developer.stream_chunk_size, + metrics: params.executor_stats, + join_type_proto: node.get_join_type()?, + }; + + dispatcher_args.dispatch() + } +} + +struct TemporalJoinExecutorDispatcherArgs { + ctx: ActorContextRef, + left: BoxedExecutor, + right: BoxedExecutor, + right_table: StorageTable, + left_join_keys: Vec, + right_join_keys: Vec, + null_safe: Vec, + condition: Option, + pk_indices: PkIndices, + output_indices: Vec, + table_output_indices: Vec, + executor_id: u64, + watermark_epoch: AtomicU64Ref, + chunk_size: usize, + metrics: Arc, + join_type_proto: JoinTypeProto, +} + +impl TemporalJoinExecutorDispatcherArgs { + pub fn dispatch(self) -> StreamResult { + macro_rules! build { + ($join_type:ident) => { + Ok(Box::new( + TemporalJoinExecutor::::new( + self.ctx, + self.left, + self.right, + self.right_table, + self.left_join_keys, + self.right_join_keys, + self.null_safe, + self.condition, + self.pk_indices, + self.output_indices, + self.table_output_indices, + self.executor_id, + self.watermark_epoch, + self.metrics, + self.chunk_size, + ), + )) + }; + } + match self.join_type_proto { + JoinTypeProto::Inner => build!(Inner), + JoinTypeProto::LeftOuter => build!(LeftOuter), + _ => unreachable!(), + } + } +} diff --git a/src/stream/src/from_proto/top_n.rs b/src/stream/src/from_proto/top_n.rs index 757fa2b7b6493..930db956c2b21 100644 --- a/src/stream/src/from_proto/top_n.rs +++ b/src/stream/src/from_proto/top_n.rs @@ -14,17 +14,17 @@ use std::sync::Arc; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_pb::stream_plan::TopNNode; use super::*; use crate::common::table::state_table::StateTable; use crate::executor::TopNExecutor; -pub struct TopNExecutorNewBuilder; +pub struct TopNExecutorBuilder; #[async_trait::async_trait] -impl ExecutorBuilder for TopNExecutorNewBuilder { +impl ExecutorBuilder for TopNExecutorBuilder { type Node = TopNNode; async fn new_boxed_executor( @@ -41,9 +41,13 @@ impl ExecutorBuilder for TopNExecutorNewBuilder { let storage_key = table .get_pk() .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) + .collect(); + let order_by = node + .order_by + .iter() + .map(ColumnOrder::from_protobuf) .collect(); - let order_by = node.order_by.iter().map(OrderPair::from_protobuf).collect(); assert_eq!(¶ms.pk_indices, input.pk_indices()); if node.with_ties { diff --git a/src/stream/src/from_proto/top_n_appendonly.rs b/src/stream/src/from_proto/top_n_appendonly.rs index f4d06b8b97601..933cf0e5d5071 100644 --- a/src/stream/src/from_proto/top_n_appendonly.rs +++ b/src/stream/src/from_proto/top_n_appendonly.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use risingwave_common::util::sort_util::OrderPair; +use risingwave_common::util::sort_util::ColumnOrder; use risingwave_pb::stream_plan::TopNNode; use super::*; @@ -41,9 +41,13 @@ impl ExecutorBuilder for AppendOnlyTopNExecutorBuilder { let storage_key = table .get_pk() .iter() - .map(OrderPair::from_protobuf) + .map(ColumnOrder::from_protobuf) + .collect(); + let order_by = node + .order_by + .iter() + .map(ColumnOrder::from_protobuf) .collect(); - let order_by = node.order_by.iter().map(OrderPair::from_protobuf).collect(); assert_eq!(¶ms.pk_indices, input.pk_indices()); if node.with_ties { diff --git a/src/stream/src/lib.rs b/src/stream/src/lib.rs index 20885d8311975..aea2f732e72d3 100644 --- a/src/stream/src/lib.rs +++ b/src/stream/src/lib.rs @@ -49,13 +49,3 @@ pub mod error; pub mod executor; mod from_proto; pub mod task; - -/// Controls the behavior when a compute error happens. -/// -/// - If set to `false`, `NULL` will be inserted. -/// - TODO: If set to `true`, The MV will be suspended and removed from further checkpoints. It can -/// still be used to serve outdated data without corruption. -/// -/// See also . -#[expect(dead_code)] -const STRICT_MODE: bool = false; diff --git a/src/stream/src/task/barrier_manager.rs b/src/stream/src/task/barrier_manager.rs index 3116aba68164f..548117bc8f12c 100644 --- a/src/stream/src/task/barrier_manager.rs +++ b/src/stream/src/task/barrier_manager.rs @@ -16,7 +16,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::anyhow; use prometheus::HistogramTimer; -use risingwave_pb::stream_service::barrier_complete_response::CreateMviewProgress as ProstCreateMviewProgress; +use risingwave_pb::stream_service::barrier_complete_response::PbCreateMviewProgress; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; use tokio::sync::oneshot::Receiver; @@ -42,7 +42,7 @@ pub const ENABLE_BARRIER_AGGREGATION: bool = false; /// Collect result of some barrier on current compute node. Will be reported to the meta service. #[derive(Debug)] pub struct CollectResult { - pub create_mview_progress: Vec, + pub create_mview_progress: Vec, } enum BarrierState { diff --git a/src/stream/src/task/env.rs b/src/stream/src/task/env.rs index 32c468ffaddc2..e4499cd7c2336 100644 --- a/src/stream/src/task/env.rs +++ b/src/stream/src/task/env.rs @@ -16,9 +16,12 @@ use std::sync::Arc; use hytra::TrAdder; use risingwave_common::config::StreamingConfig; +use risingwave_common::system_param::local_manager::LocalSystemParamsManagerRef; use risingwave_common::util::addr::HostAddr; use risingwave_connector::source::monitor::SourceMetrics; use risingwave_connector::ConnectorParams; +#[cfg(test)] +use risingwave_pb::connector_service::SinkPayloadFormat; use risingwave_source::dml_manager::DmlManagerRef; use risingwave_storage::StateStoreImpl; @@ -46,6 +49,9 @@ pub struct StreamEnvironment { /// Manages dml information. dml_manager: DmlManagerRef, + /// Read the latest system parameters. + system_params_manager: LocalSystemParamsManagerRef, + /// Metrics for source. source_metrics: Arc, @@ -54,6 +60,7 @@ pub struct StreamEnvironment { } impl StreamEnvironment { + #[allow(clippy::too_many_arguments)] pub fn new( server_addr: HostAddr, connector_params: ConnectorParams, @@ -61,6 +68,7 @@ impl StreamEnvironment { worker_id: WorkerNodeId, state_store: StateStoreImpl, dml_manager: DmlManagerRef, + system_params_manager: LocalSystemParamsManagerRef, source_metrics: Arc, ) -> Self { StreamEnvironment { @@ -70,6 +78,7 @@ impl StreamEnvironment { worker_id, state_store, dml_manager, + system_params_manager, source_metrics, total_mem_val: Arc::new(TrAdder::new()), } @@ -78,17 +87,19 @@ impl StreamEnvironment { // Create an instance for testing purpose. #[cfg(test)] pub fn for_test() -> Self { + use risingwave_common::system_param::local_manager::LocalSystemParamsManager; use risingwave_source::dml_manager::DmlManager; use risingwave_storage::monitor::MonitoredStorageMetrics; StreamEnvironment { server_addr: "127.0.0.1:5688".parse().unwrap(), - connector_params: ConnectorParams::new(None), + connector_params: ConnectorParams::new(None, SinkPayloadFormat::Json), config: Arc::new(StreamingConfig::default()), worker_id: WorkerNodeId::default(), state_store: StateStoreImpl::shared_in_memory_store(Arc::new( MonitoredStorageMetrics::unused(), )), dml_manager: Arc::new(DmlManager::default()), + system_params_manager: Arc::new(LocalSystemParamsManager::for_test()), source_metrics: Arc::new(SourceMetrics::default()), total_mem_val: Arc::new(TrAdder::new()), } @@ -118,6 +129,10 @@ impl StreamEnvironment { self.dml_manager.clone() } + pub fn system_params_manager_ref(&self) -> LocalSystemParamsManagerRef { + self.system_params_manager.clone() + } + pub fn source_metrics(&self) -> Arc { self.source_metrics.clone() } diff --git a/src/stream/src/task/stream_manager.rs b/src/stream/src/task/stream_manager.rs index c9e26b70b4e61..3edac01990869 100644 --- a/src/stream/src/task/stream_manager.rs +++ b/src/stream/src/task/stream_manager.rs @@ -310,18 +310,19 @@ impl LocalStreamManager { Ok(()) } - pub async fn drop_actor(&self, actors: &[ActorId]) -> StreamResult<()> { + /// Drop the resources of the given actors. + pub async fn drop_actors(&self, actors: &[ActorId]) -> StreamResult<()> { let mut core = self.core.lock().await; - for id in actors { - core.drop_actor(*id); + for &id in actors { + core.drop_actor(id); } tracing::debug!(actors = ?actors, "drop actors"); Ok(()) } - /// Force stop all actors on this worker. + /// Force stop all actors on this worker, and then drop their resources. pub async fn stop_all_actors(&self) -> StreamResult<()> { - self.core.lock().await.drop_all_actors().await; + self.core.lock().await.stop_all_actors().await; // Clear shared buffer in storage to release memory self.clear_storage_buffer().await; self.clear_all_senders_and_collect_rx(); @@ -557,7 +558,7 @@ impl LocalStreamManagerCore { // If there're multiple stateful executors in this actor, we will wrap it into a subtask. let executor = if has_stateful && is_stateful { - let (subtask, executor) = subtask::wrap(executor); + let (subtask, executor) = subtask::wrap(executor, actor_context.id); subtasks.push(subtask); executor.boxed() } else { @@ -781,14 +782,16 @@ impl LocalStreamManagerCore { .inspect(|handle| handle.abort()); self.context.actor_infos.write().remove(&actor_id); self.actors.remove(&actor_id); - // Task should have already stopped when this method is invoked. - self.handles - .remove(&actor_id) - .inspect(|handle| handle.abort()); + + // Task should have already stopped when this method is invoked. There might be some + // clean-up work left (like dropping in-memory data structures), but we don't have to wait + // for them to finish, in order to make this request non-blocking. + self.handles.remove(&actor_id); } - /// `drop_all_actors` is invoked by meta node via RPC for recovery purpose. - async fn drop_all_actors(&mut self) { + /// `stop_all_actors` is invoked by meta node via RPC for recovery purpose. Different from the + /// `drop_actor`, the execution of the actors will be aborted. + async fn stop_all_actors(&mut self) { for (actor_id, handle) in &self.handles { tracing::debug!("force stopping actor {}", actor_id); handle.abort(); diff --git a/src/tests/compaction_test/src/compaction_test_runner.rs b/src/tests/compaction_test/src/compaction_test_runner.rs index c266eef5096a6..1a84d4406872a 100644 --- a/src/tests/compaction_test/src/compaction_test_runner.rs +++ b/src/tests/compaction_test/src/compaction_test_runner.rs @@ -43,7 +43,7 @@ use risingwave_storage::store::{ReadOptions, StateStoreRead}; use risingwave_storage::{StateStore, StateStoreImpl}; const SST_ID_SHIFT_COUNT: u32 = 1000000; -const CHECKPOINT_FREQ_FOR_REPLAY: usize = 99999999; +const CHECKPOINT_FREQ_FOR_REPLAY: u64 = 99999999; use crate::CompactionTestOpts; @@ -82,6 +82,7 @@ pub async fn compaction_test_main( let _meta_handle = tokio::spawn(start_meta_node( meta_listen_addr.clone(), + opts.state_store.clone(), opts.config_path_for_meta.clone(), )); @@ -92,7 +93,6 @@ pub async fn compaction_test_main( let (compactor_thrd, compactor_shutdown_tx) = start_compactor_thread( opts.meta_address.clone(), advertise_addr.to_string(), - opts.state_store.clone(), opts.config_path.clone(), ); @@ -124,13 +124,17 @@ pub async fn compaction_test_main( Ok(()) } -pub async fn start_meta_node(listen_addr: String, config_path: String) { +pub async fn start_meta_node(listen_addr: String, state_store: String, config_path: String) { let meta_opts = risingwave_meta::MetaNodeOpts::parse_from([ "meta-node", "--listen-addr", &listen_addr, + "--advertise-addr", + &listen_addr, "--backend", "mem", + "--state-store", + &state_store, "--config-path", &config_path, ]); @@ -138,36 +142,33 @@ pub async fn start_meta_node(listen_addr: String, config_path: String) { &meta_opts.config_path, Some(meta_opts.override_opts.clone()), ); - assert!( - config.meta.enable_compaction_deterministic, - "enable_compaction_deterministic should be set" - ); - // We set a large checkpoint frequency to prevent the embedded meta node // to commit new epochs to avoid bumping the hummock version during version log replay. assert_eq!( CHECKPOINT_FREQ_FOR_REPLAY, - config.streaming.checkpoint_frequency + config.system.checkpoint_frequency + ); + assert!( + config.meta.enable_compaction_deterministic, + "enable_compaction_deterministic should be set" ); + risingwave_meta::start(meta_opts).await } async fn start_compactor_node( meta_rpc_endpoint: String, advertise_addr: String, - state_store: String, config_path: String, ) { let opts = risingwave_compactor::CompactorOpts::parse_from([ "compactor-node", - "--host", + "--listen-addr", "127.0.0.1:5550", "--advertise-addr", &advertise_addr, "--meta-address", &meta_rpc_endpoint, - "--state-store", - &state_store, "--config-path", &config_path, ]); @@ -177,7 +178,6 @@ async fn start_compactor_node( pub fn start_compactor_thread( meta_endpoint: String, advertise_addr: String, - state_store: String, config_path: String, ) -> (JoinHandle<()>, std::sync::mpsc::Sender<()>) { let (tx, rx) = std::sync::mpsc::channel(); @@ -189,7 +189,7 @@ pub fn start_compactor_thread( runtime.block_on(async { tokio::spawn(async { tracing::info!("Starting compactor node"); - start_compactor_node(meta_endpoint, advertise_addr, state_store, config_path).await + start_compactor_node(meta_endpoint, advertise_addr, config_path).await }); rx.recv().unwrap(); }); diff --git a/src/tests/compaction_test/src/delete_range_runner.rs b/src/tests/compaction_test/src/delete_range_runner.rs index d7dd8eb0f724d..7f8f6e275e715 100644 --- a/src/tests/compaction_test/src/delete_range_runner.rs +++ b/src/tests/compaction_test/src/delete_range_runner.rs @@ -37,15 +37,14 @@ use risingwave_meta::hummock::test_utils::setup_compute_env_with_config; use risingwave_meta::hummock::MockHummockMetaClient; use risingwave_object_store::object::object_metrics::ObjectStoreMetrics; use risingwave_object_store::object::parse_remote_object_store; -use risingwave_pb::catalog::Table as ProstTable; +use risingwave_pb::catalog::PbTable; use risingwave_pb::hummock::{CompactionConfig, CompactionGroupInfo}; use risingwave_pb::meta::SystemParams; use risingwave_rpc_client::HummockMetaClient; -use risingwave_storage::hummock::backup_reader::BackupReader; use risingwave_storage::hummock::compactor::{CompactionExecutor, CompactorContext}; use risingwave_storage::hummock::sstable_store::SstableStoreRef; use risingwave_storage::hummock::{ - HummockStorage, MemoryLimiter, SstableIdManager, SstableStore, TieredCache, + HummockStorage, MemoryLimiter, SstableObjectIdManager, SstableStore, TieredCache, }; use risingwave_storage::monitor::{CompactorMetrics, HummockStateStoreMetrics}; use risingwave_storage::opts::StorageOpts; @@ -103,7 +102,7 @@ async fn compaction_test( worker_node.id, )); - let delete_key_table = ProstTable { + let delete_key_table = PbTable { id: 1, schema_id: 1, database_id: 1, @@ -186,7 +185,6 @@ async fn compaction_test( let store = HummockStorage::new( storage_opts.clone(), sstable_store.clone(), - BackupReader::unused(), meta_client.clone(), get_notification_client_for_test(env, hummock_manager_ref.clone(), worker_node), state_store_metrics.clone(), @@ -194,7 +192,7 @@ async fn compaction_test( compactor_metrics.clone(), ) .await?; - let sstable_id_manager = store.sstable_id_manager().clone(); + let sstable_object_id_manager = store.sstable_object_id_manager().clone(); let filter_key_extractor_manager = store.filter_key_extractor_manager().clone(); filter_key_extractor_manager.update( 1, @@ -214,7 +212,7 @@ async fn compaction_test( sstable_store, meta_client.clone(), filter_key_extractor_manager, - sstable_id_manager, + sstable_object_id_manager, compactor_metrics, ); run_compare_result(&store, meta_client.clone(), test_range, test_count) @@ -533,7 +531,7 @@ fn run_compactor_thread( sstable_store: SstableStoreRef, meta_client: Arc, filter_key_extractor_manager: Arc, - sstable_id_manager: Arc, + sstable_object_id_manager: Arc, compactor_metrics: Arc, ) -> ( tokio::task::JoinHandle<()>, @@ -548,7 +546,7 @@ fn run_compactor_thread( compaction_executor: Arc::new(CompactionExecutor::new(None)), filter_key_extractor_manager, read_memory_limiter: MemoryLimiter::unlimit(), - sstable_id_manager, + sstable_object_id_manager, task_progress_manager: Default::default(), compactor_runtime_config: Arc::new(tokio::sync::Mutex::new(CompactorRuntimeConfig { max_concurrent_task_number: 4, diff --git a/src/tests/e2e_extended_mode/Cargo.toml b/src/tests/e2e_extended_mode/Cargo.toml new file mode 100644 index 0000000000000..831c7ba35a636 --- /dev/null +++ b/src/tests/e2e_extended_mode/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "risingwave_e2e_extended_mode_test" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack"] + +[dependencies] +anyhow = { version = "1", features = ["backtrace"] } +chrono = { version = "0.4", features = ['serde'] } +clap = { version = "4", features = ["derive"] } +pg_interval = "0.4" +rust_decimal ={ version = "1.25", features = ["db-postgres","db-tokio-postgres"] } +tokio = { version = "1", features = ["rt", "macros","rt-multi-thread"] } +tokio-postgres = { version = "0.7", features = ["with-chrono-0_4"] } +tracing = "0.1" +tracing-subscriber = "0.3.16" + +[[bin]] +name = "risingwave_e2e_extended_mode_test" +path = "src/main.rs" diff --git a/src/tests/e2e_extended_mode/README.md b/src/tests/e2e_extended_mode/README.md new file mode 100644 index 0000000000000..5c0a1fbc96871 --- /dev/null +++ b/src/tests/e2e_extended_mode/README.md @@ -0,0 +1,21 @@ +This is a program used for e2e test in extended mode. + +## What is difference between it and extended_mode/*.slt in e2e_test + +For e2e test in extended query mode, there are two thing we can't test in sqllogitest +1. bind parameter +2. max row number +See [detail](https://www.postgresql.org/docs/15/protocol-flow.html#PROTOCOL-FLOW-PIPELINING:~:text=Once%20a%20portal,count%20is%20ignored) + +So before sqllogictest supporting these, we test these function in this program. + +In the future, we may merge it to e2e_text/extended_query + +# How to run + +```shell +RUST_BACKTRACE=1 target/debug/risingwave_e2e_extended_mode_test --host 127.0.0.1 \ + -p 4566 \ + -u root \ + --database dev \ +``` \ No newline at end of file diff --git a/src/tests/e2e_extended_mode/src/main.rs b/src/tests/e2e_extended_mode/src/main.rs new file mode 100644 index 0000000000000..32f5419048492 --- /dev/null +++ b/src/tests/e2e_extended_mode/src/main.rs @@ -0,0 +1,48 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod opts; +mod test; + +use std::process::exit; + +use clap::Parser; +use tracing::{error, info}; + +use crate::opts::Opts; +use crate::test::TestSuite; + +#[tokio::main(flavor = "multi_thread", worker_threads = 5)] +async fn main() { + exit(run_test().await) +} + +async fn run_test() -> i32 { + let opts = Opts::parse(); + + tracing_subscriber::fmt::init(); + + let test_suite = TestSuite::new(opts); + + match test_suite.test().await { + Ok(_) => { + info!("Risingwave e2e extended mode test completed successfully!"); + 0 + } + Err(e) => { + error!("Risingwave e2e extended mode test failed: {:?}. Please ensure that your psql version is larger than 14.1", e); + 1 + } + } +} diff --git a/src/tests/e2e_extended_mode/src/opts.rs b/src/tests/e2e_extended_mode/src/opts.rs new file mode 100644 index 0000000000000..bd83c54e3e8ff --- /dev/null +++ b/src/tests/e2e_extended_mode/src/opts.rs @@ -0,0 +1,33 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use clap::{Parser, ValueHint}; + +#[derive(Parser, Debug, Clone)] +pub struct Opts { + /// Database name used to connect to pg. + #[clap(name = "DB", long = "database", default_value = "dev")] + pub pg_db_name: String, + /// Username used to connect to postgresql. + #[clap(name = "PG_USERNAME", short = 'u', long = "user", default_value="postgres", value_hint=ValueHint::Username)] + pub pg_user_name: String, + /// Postgresql server address to test against. + #[clap(name = "PG_SERVER_ADDRESS", long = "host", default_value = "localhost")] + pub pg_server_host: String, + /// Postgresql server port to test against. + #[clap(name = "PG_SERVER_PORT", short = 'p', long = "port")] + pub pg_server_port: u16, + #[clap(name = "PG_PASSWARD", long = "password", default_value = "")] + pub pg_password: String, +} diff --git a/src/tests/e2e_extended_mode/src/test.rs b/src/tests/e2e_extended_mode/src/test.rs new file mode 100644 index 0000000000000..abf701af763ce --- /dev/null +++ b/src/tests/e2e_extended_mode/src/test.rs @@ -0,0 +1,334 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use pg_interval::Interval; +use rust_decimal::prelude::FromPrimitive; +use rust_decimal::Decimal; +use tokio_postgres::types::Type; +use tokio_postgres::NoTls; + +use crate::opts::Opts; + +pub struct TestSuite { + config: String, +} + +macro_rules! test_eq { + ($left:expr, $right:expr $(,)?) => { + match (&$left, &$right) { + (left_val, right_val) => { + if !(*left_val == *right_val) { + return Err(anyhow!( + "assertion failed: `(left == right)` \ + (left: `{:?}`, right: `{:?}`)", + left_val, + right_val + )); + } + } + } + }; +} + +impl TestSuite { + pub fn new(opts: Opts) -> Self { + let config = if !opts.pg_password.is_empty() { + format!( + "dbname={} user={} host={} port={} password={}", + opts.pg_db_name, + opts.pg_user_name, + opts.pg_server_host, + opts.pg_server_port, + opts.pg_password + ) + } else { + format!( + "dbname={} user={} host={} port={}", + opts.pg_db_name, opts.pg_user_name, opts.pg_server_host, opts.pg_server_port + ) + }; + Self { config } + } + + pub async fn test(&self) -> anyhow::Result<()> { + self.binary_param_and_result().await?; + self.dql_dml_with_param().await?; + self.max_row().await?; + Ok(()) + } + + pub async fn binary_param_and_result(&self) -> anyhow::Result<()> { + // Connect to the database. + let (client, connection) = tokio_postgres::connect(&self.config, NoTls).await?; + + // The connection object performs the actual communication with the database, + // so spawn it off to run on its own. + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("connection error: {}", e); + } + }); + + for row in client.query("select $1::SMALLINT;", &[&1024_i16]).await? { + let data: i16 = row.try_get(0)?; + test_eq!(data, 1024); + } + + for row in client.query("select $1::INT;", &[&144232_i32]).await? { + let data: i32 = row.try_get(0)?; + test_eq!(data, 144232); + } + + for row in client.query("select $1::BIGINT;", &[&99999999_i64]).await? { + let data: i64 = row.try_get(0)?; + test_eq!(data, 99999999); + } + + for row in client + .query("select $1::DECIMAL;", &[&Decimal::from_f32(2.33454_f32)]) + .await? + { + let data: Decimal = row.try_get(0)?; + test_eq!(data, Decimal::from_f32(2.33454_f32).unwrap()); + } + + for row in client.query("select $1::BOOL;", &[&true]).await? { + let data: bool = row.try_get(0)?; + assert!(data); + } + + for row in client.query("select $1::REAL;", &[&1.234234_f32]).await? { + let data: f32 = row.try_get(0)?; + test_eq!(data, 1.234234); + } + + // TODO(ZENOTME): After #8112, risingwave should support this case. (DOUBLE PRECISION TYPE) + // for row in client + // .query("select $1::DOUBLE PRECISION;", &[&234234.23490238483_f64]) + // .await? + // { + // let data: f64 = row.try_get(0)?; + // test_eq!(data, 234234.23490238483); + // } + for row in client + .query("select $1::FLOAT8;", &[&234234.23490238483_f64]) + .await? + { + let data: f64 = row.try_get(0)?; + test_eq!(data, 234234.23490238483); + } + + for row in client + .query( + "select $1::date;", + &[&NaiveDate::from_ymd_opt(2022, 1, 1).unwrap()], + ) + .await? + { + let data: NaiveDate = row.try_get(0)?; + test_eq!(data, NaiveDate::from_ymd_opt(2022, 1, 1).unwrap()); + } + + for row in client + .query( + "select $1::time", + &[&NaiveTime::from_hms_opt(10, 0, 0).unwrap()], + ) + .await? + { + let data: NaiveTime = row.try_get(0)?; + test_eq!(data, NaiveTime::from_hms_opt(10, 0, 0).unwrap()); + } + + for row in client + .query( + "select $1::timestamp", + &[&NaiveDate::from_ymd_opt(2022, 1, 1) + .unwrap() + .and_hms_opt(10, 0, 0) + .unwrap()], + ) + .await? + { + let data: NaiveDateTime = row.try_get(0)?; + test_eq!( + data, + NaiveDate::from_ymd_opt(2022, 1, 1) + .unwrap() + .and_hms_opt(10, 0, 0) + .unwrap() + ); + } + + let timestamptz = DateTime::::from_utc( + NaiveDate::from_ymd_opt(2022, 1, 1) + .unwrap() + .and_hms_opt(10, 0, 0) + .unwrap(), + Utc, + ); + for row in client + .query("select $1::timestamptz", &[×tamptz]) + .await? + { + let data: DateTime = row.try_get(0)?; + test_eq!(data, timestamptz); + } + + for row in client + .query("select $1::interval", &[&Interval::new(1, 1, 24000000)]) + .await? + { + let data: Interval = row.try_get(0)?; + test_eq!(data, Interval::new(1, 1, 24000000)); + } + + Ok(()) + } + + /// TODO(ZENOTME): After #8112, risingwave should support to change all `prepare_typed` to + /// `prepare`. We don't need to provide the type explicitly. + async fn dql_dml_with_param(&self) -> anyhow::Result<()> { + let (client, connection) = tokio_postgres::connect(&self.config, NoTls).await?; + + // The connection object performs the actual communication with the database, + // so spawn it off to run on its own. + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("connection error: {}", e); + } + }); + + client.query("create table t(id int)", &[]).await?; + + let insert_statement = client + .prepare_typed("insert INTO t (id) VALUES ($1)", &[Type::INT4]) + .await?; + + for i in 0..20 { + client.execute(&insert_statement, &[&i]).await?; + } + client.execute("flush", &[]).await?; + + let update_statement = client + .prepare_typed( + "update t set id = $1 where id < $2", + &[Type::INT4, Type::INT4], + ) + .await?; + let query_statement = client + .prepare_typed( + "select * FROM t where id < $1 order by id ASC", + &[Type::INT4], + ) + .await?; + let delete_statement = client + .prepare_typed("delete FROM t where id < $1", &[Type::INT4]) + .await?; + + let mut i = 0; + for row in client.query(&query_statement, &[&10_i32]).await? { + let id: i32 = row.try_get(0)?; + test_eq!(id, i); + i += 1; + } + test_eq!(i, 10); + + client + .execute(&update_statement, &[&100_i32, &10_i32]) + .await?; + client.execute("flush", &[]).await?; + + let mut i = 0; + for _ in client.query(&query_statement, &[&10_i32]).await? { + i += 1; + } + test_eq!(i, 0); + + client.execute(&delete_statement, &[&20_i32]).await?; + client.execute("flush", &[]).await?; + + let mut i = 0; + for row in client.query(&query_statement, &[&101_i32]).await? { + let id: i32 = row.try_get(0)?; + test_eq!(id, 100); + i += 1; + } + test_eq!(i, 10); + + client.execute("drop table t", &[]).await?; + + Ok(()) + } + + async fn max_row(&self) -> anyhow::Result<()> { + let (mut client, connection) = tokio_postgres::connect(&self.config, NoTls).await?; + + // The connection object performs the actual communication with the database, + // so spawn it off to run on its own. + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("connection error: {}", e); + } + }); + + client.query("create table t(id int)", &[]).await?; + + let insert_statement = client + .prepare_typed("insert INTO t (id) VALUES ($1)", &[Type::INT4]) + .await?; + + for i in 0..10 { + client.execute(&insert_statement, &[&i]).await?; + } + client.execute("flush", &[]).await?; + + let transaction = client.transaction().await?; + let statement = transaction + .prepare_typed("SELECT * FROM t order by id", &[]) + .await?; + let portal = transaction.bind(&statement, &[]).await?; + + for t in 0..5 { + let rows = transaction.query_portal(&portal, 1).await?; + test_eq!(rows.len(), 1); + let row = rows.get(0).unwrap(); + let id: i32 = row.get(0); + test_eq!(id, t); + } + + let mut i = 5; + for row in transaction.query_portal(&portal, 3).await? { + let id: i32 = row.get(0); + test_eq!(id, i); + i += 1; + } + test_eq!(i, 8); + + for row in transaction.query_portal(&portal, 5).await? { + let id: i32 = row.get(0); + test_eq!(id, i); + i += 1; + } + test_eq!(i, 10); + + transaction.rollback().await?; + + client.execute("drop table t", &[]).await?; + + Ok(()) + } +} diff --git a/src/tests/regress/data/sql/interval.sql b/src/tests/regress/data/sql/interval.sql index fdec559bac59d..fd8fbcf14c502 100644 --- a/src/tests/regress/data/sql/interval.sql +++ b/src/tests/regress/data/sql/interval.sql @@ -7,13 +7,13 @@ SET IntervalStyle to postgres; -- check acceptance of "time zone style" SELECT INTERVAL '01:00' AS "One hour"; ---@ SELECT INTERVAL '+02:00' AS "Two hours"; +SELECT INTERVAL '+02:00' AS "Two hours"; SELECT INTERVAL '-08:00' AS "Eight hours"; --@ SELECT INTERVAL '-1 +02:03' AS "22 hours ago..."; ---@ SELECT INTERVAL '-1 days +02:03' AS "22 hours ago..."; +SELECT INTERVAL '-1 days +02:03' AS "22 hours ago..."; --@ SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours"; --@ SELECT INTERVAL '1.5 months' AS "One month 15 days"; ---@ SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years..."; +SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years..."; CREATE TABLE INTERVAL_TBL (f1 interval); @@ -69,24 +69,24 @@ INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 days 2147483647 months'), ('-2147483648 days -2147483648 months'); -- these should fail as out-of-range ---@ INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days'); ---@ INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days'); ---@ INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years'); ---@ INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'); +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days'); +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days'); +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years'); +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'); -- Test edge-case overflow detection in interval multiplication --@ select extract(epoch from '256 microseconds'::interval * (2^55)::float8); ---@ SELECT r1.*, r2.* ---@ FROM INTERVAL_TBL_OF r1, INTERVAL_TBL_OF r2 ---@ WHERE r1.f1 > r2.f1 ---@ ORDER BY r1.f1, r2.f1; +SELECT r1.*, r2.* + FROM INTERVAL_TBL_OF r1, INTERVAL_TBL_OF r2 + WHERE r1.f1 > r2.f1 + ORDER BY r1.f1, r2.f1; --@ CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1); --@ SET enable_seqscan TO false; --@ EXPLAIN (COSTS OFF) --@ SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1; ---@ SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1; +SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1; --@ RESET enable_seqscan; DROP TABLE INTERVAL_TBL_OF; @@ -101,27 +101,27 @@ DROP TABLE INTERVAL_TBL_OF; CREATE TABLE INTERVAL_MULDIV_TBL (span interval); -- COPY INTERVAL_MULDIV_TBL FROM STDIN; ---@ INSERT INTO INTERVAL_MULDIV_TBL VALUES ---@ ('41 mon 12 days 360:00'), ---@ ('-41 mon -12 days +360:00'), ---@ ('-12 days'), ---@ ('9 mon -27 days 12:34:56'), ---@ ('-3 years 482 days 76:54:32.189'), ---@ ('4 mon'), ---@ ('14 mon'), ---@ ('999 mon 999 days'); - ---@ SELECT span * 0.3 AS product ---@ FROM INTERVAL_MULDIV_TBL; ---@ ---@ SELECT span * 8.2 AS product ---@ FROM INTERVAL_MULDIV_TBL; ---@ ---@ SELECT span / 10 AS quotient ---@ FROM INTERVAL_MULDIV_TBL; ---@ ---@ SELECT span / 100 AS quotient ---@ FROM INTERVAL_MULDIV_TBL; +INSERT INTO INTERVAL_MULDIV_TBL VALUES +('41 mon 12 days 360:00'), +('-41 mon -12 days +360:00'), +('-12 days'), +('9 mon -27 days 12:34:56'), +('-3 years 482 days 76:54:32.189'), +('4 mon'), +('14 mon'), +('999 mon 999 days'); + +SELECT span * 0.3 AS product +FROM INTERVAL_MULDIV_TBL; + +SELECT span * 8.2 AS product +FROM INTERVAL_MULDIV_TBL; + +SELECT span / 10 AS quotient +FROM INTERVAL_MULDIV_TBL; + +SELECT span / 100 AS quotient +FROM INTERVAL_MULDIV_TBL; DROP TABLE INTERVAL_MULDIV_TBL; @@ -284,8 +284,8 @@ SET IntervalStyle to postgres; --@ interval 'PT10:30' AS "hour minute"; -- test a couple rounding cases that changed since 8.3 w/ HAVE_INT64_TIMESTAMP. ---@ select interval '-10 mons -3 days +03:55:06.70'; ---@ select interval '1 year 2 mons 3 days 04:05:06.699999'; +select interval '-10 mons -3 days +03:55:06.70'; +select interval '1 year 2 mons 3 days 04:05:06.699999'; --@ select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; -- check that '30 days' equals '1 month' according to the hash function diff --git a/src/tests/simulation/Cargo.toml b/src/tests/simulation/Cargo.toml index 0cd3764d8066c..6fd353dfb56c5 100644 --- a/src/tests/simulation/Cargo.toml +++ b/src/tests/simulation/Cargo.toml @@ -20,8 +20,10 @@ etcd-client = { version = "0.2.17", package = "madsim-etcd-client" } futures = { version = "0.3", default-features = false, features = ["alloc"] } glob = "0.3" itertools = "0.10" +lru = { git = "https://github.com/risingwavelabs/lru-rs.git", branch = "evict_by_timestamp" } madsim = "0.2.17" paste = "1" +pin-project = "1.0" pretty_assertions = "1" rand = "0.8" rdkafka = { package = "madsim-rdkafka", version = "=0.2.14-alpha", features = ["cmake-build"] } @@ -32,6 +34,7 @@ risingwave_ctl = { path = "../../ctl" } risingwave_frontend = { path = "../../frontend" } risingwave_meta = { path = "../../meta" } risingwave_pb = { path = "../../prost" } +risingwave_sqlparser = { path = "../../sqlparser" } risingwave_sqlsmith = { path = "../sqlsmith" } serde = "1.0.152" serde_derive = "1.0.152" diff --git a/src/tests/simulation/src/client.rs b/src/tests/simulation/src/client.rs index 551378a6cf8f2..19cd248b44864 100644 --- a/src/tests/simulation/src/client.rs +++ b/src/tests/simulation/src/client.rs @@ -14,6 +14,11 @@ use std::time::Duration; +use itertools::Itertools; +use lru::{Iter, LruCache}; +use risingwave_sqlparser::ast::Statement; +use risingwave_sqlparser::parser::Parser; + /// A RisingWave client. pub struct RisingWave { client: tokio_postgres::Client, @@ -22,7 +27,70 @@ pub struct RisingWave { dbname: String, /// The `SET` statements that have been executed on this client. /// We need to replay them when reconnecting. - set_stmts: Vec, + set_stmts: SetStmts, +} + +/// `SetStmts` stores and compacts all `SET` statements that have been executed in the client +/// history. +pub struct SetStmts { + stmts_cache: LruCache, +} + +impl Default for SetStmts { + fn default() -> Self { + Self { + stmts_cache: LruCache::unbounded(), + } + } +} + +struct SetStmtsIterator<'a, 'b> +where + 'a: 'b, +{ + _stmts: &'a SetStmts, + stmts_iter: core::iter::Rev>, +} + +impl<'a, 'b> SetStmtsIterator<'a, 'b> { + fn new(stmts: &'a SetStmts) -> Self { + Self { + _stmts: stmts, + stmts_iter: stmts.stmts_cache.iter().rev(), + } + } +} + +impl SetStmts { + fn push(&mut self, sql: &str) { + let ast = Parser::parse_sql(&sql).expect("a set statement should be parsed successfully"); + match ast + .into_iter() + .exactly_one() + .expect("should contain only one statement") + { + // record `local` for variable and `SetTransaction` if supported in the future. + Statement::SetVariable { + local: _, + variable, + value: _, + } => { + let key = variable.real_value().to_lowercase(); + // store complete sql as value. + self.stmts_cache.put(key, sql.to_string()); + } + _ => unreachable!(), + } + } +} + +impl Iterator for SetStmtsIterator<'_, '_> { + type Item = String; + + fn next(&mut self) -> Option { + let (_, stmt) = self.stmts_iter.next()?; + Some(stmt.clone()) + } } impl RisingWave { @@ -30,18 +98,27 @@ impl RisingWave { host: String, dbname: String, ) -> Result { - Self::reconnect(host, dbname, vec![]).await + let set_stmts = SetStmts::default(); + let (client, task) = Self::connect_inner(&host, &dbname, &set_stmts).await?; + Ok(Self { + client, + task, + host, + dbname, + set_stmts, + }) } - pub async fn reconnect( - host: String, - dbname: String, - set_stmts: Vec, - ) -> Result { + pub async fn connect_inner( + host: &str, + dbname: &str, + set_stmts: &SetStmts, + ) -> Result<(tokio_postgres::Client, tokio::task::JoinHandle<()>), tokio_postgres::error::Error> + { let (client, connection) = tokio_postgres::Config::new() - .host(&host) + .host(host) .port(4566) - .dbname(&dbname) + .dbname(dbname) .user("root") .connect_timeout(Duration::from_secs(5)) .connect(tokio_postgres::NoTls) @@ -64,16 +141,17 @@ impl RisingWave { .simple_query("SET VISIBILITY_MODE TO checkpoint;") .await?; // replay all SET statements - for stmt in &set_stmts { - client.simple_query(stmt).await?; + for stmt in SetStmtsIterator::new(&set_stmts) { + client.simple_query(&stmt).await?; } - Ok(RisingWave { - client, - task, - host, - dbname, - set_stmts, - }) + Ok((client, task)) + } + + pub async fn reconnect(&mut self) -> Result<(), tokio_postgres::error::Error> { + let (client, task) = Self::connect_inner(&self.host, &self.dbname, &self.set_stmts).await?; + self.client = client; + self.task = task; + Ok(()) } /// Returns a reference of the inner Postgres client. @@ -97,16 +175,11 @@ impl sqllogictest::AsyncDB for RisingWave { if self.client.is_closed() { // connection error, reset the client - *self = Self::reconnect( - self.host.clone(), - self.dbname.clone(), - self.set_stmts.clone(), - ) - .await?; + self.reconnect().await?; } if sql.trim_start().to_lowercase().starts_with("set") { - self.set_stmts.push(sql.to_string()); + self.set_stmts.push(sql); } let mut output = vec![]; diff --git a/src/tests/simulation/src/cluster.rs b/src/tests/simulation/src/cluster.rs index 121dfae36862a..bd034c38f865d 100644 --- a/src/tests/simulation/src/cluster.rs +++ b/src/tests/simulation/src/cluster.rs @@ -16,14 +16,17 @@ use std::collections::HashMap; use std::future::Future; use std::io::Write; use std::path::PathBuf; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use anyhow::{bail, Result}; use clap::Parser; +use futures::channel::{mpsc, oneshot}; use futures::future::join_all; +use futures::{SinkExt, StreamExt}; use madsim::net::ipvs::*; use madsim::runtime::{Handle, NodeHandle}; +use madsim::task::JoinHandle; use rand::Rng; use sqllogictest::AsyncDB; @@ -109,6 +112,9 @@ pub struct Cluster { } impl Cluster { + /// Start a RisingWave cluster for testing. + /// + /// This function should be called exactly once in a test. pub async fn start(conf: Configuration) -> Result { let handle = madsim::runtime::Handle::current(); println!("seed = {}", handle.seed()); @@ -204,6 +210,8 @@ impl Cluster { "etcd", "--etcd-endpoints", "etcd:2388", + "--state-store", + "hummock+minio://hummockadmin:hummockadmin@192.168.12.1:9301/hummock001", ]); handle .create_node() @@ -245,8 +253,6 @@ impl Cluster { "0.0.0.0:5688", "--advertise-addr", &format!("192.168.3.{i}:5688"), - "--state-store", - "hummock+minio://hummockadmin:hummockadmin@192.168.12.1:9301/hummock001", "--parallelism", &conf.compute_node_cores.to_string(), ]); @@ -269,8 +275,6 @@ impl Cluster { "0.0.0.0:6660", "--advertise-addr", &format!("192.168.4.{i}:6660"), - "--state-store", - "hummock+minio://hummockadmin:hummockadmin@192.168.12.1:9301/hummock001", ]); handle .create_node() @@ -305,35 +309,47 @@ impl Cluster { }) } - /// Run a SQL query from the client. - pub async fn run(&mut self, sql: impl Into) -> Result { - let sql = sql.into(); + /// Start a SQL session on the client node. + pub fn start_session(&mut self) -> Session { + let (query_tx, mut query_rx) = mpsc::channel::(0); + + self.client.spawn(async move { + let mut client = RisingWave::connect("frontend".into(), "dev".into()).await?; - let result = self - .client - .spawn(async move { - // TODO: reuse session - let mut session = RisingWave::connect("frontend".into(), "dev".into()) + while let Some((sql, tx)) = query_rx.next().await { + let result = client + .run(&sql) .await - .expect("failed to connect to RisingWave"); - let result = session.run(&sql).await?; - Ok::<_, anyhow::Error>(result) - }) - .await??; - - match result { - sqllogictest::DBOutput::Rows { rows, .. } => Ok(rows - .into_iter() - .map(|row| { - row.into_iter() - .map(|v| v.to_string()) - .collect::>() - .join(" ") - }) - .collect::>() - .join("\n")), - _ => Ok("".to_string()), - } + .map(|output| match output { + sqllogictest::DBOutput::Rows { rows, .. } => rows + .into_iter() + .map(|row| { + row.into_iter() + .map(|v| v.to_string()) + .collect::>() + .join(" ") + }) + .collect::>() + .join("\n"), + _ => "".to_string(), + }) + .map_err(Into::into); + + let _ = tx.send(result); + } + + Ok::<_, anyhow::Error>(()) + }); + + Session { query_tx } + } + + /// Run a SQL query on a **new** session of the client node. + /// + /// This is a convenience method that creates a new session and runs the query on it. If you + /// want to run multiple queries on the same session, use `start_session` and `Session::run`. + pub async fn run(&mut self, sql: impl Into) -> Result { + self.start_session().run(sql).await } /// Run a future on the client node. @@ -514,6 +530,27 @@ impl Cluster { } } +type SessionRequest = ( + String, // query sql + oneshot::Sender>, // channel to send result back +); + +/// A SQL session on the simulated client node. +#[derive(Debug, Clone)] +pub struct Session { + query_tx: mpsc::Sender, +} + +impl Session { + /// Run the given SQL query on the session. + pub async fn run(&mut self, sql: impl Into) -> Result { + let (tx, rx) = oneshot::channel(); + self.query_tx.send((sql.into(), tx)).await?; + rx.await? + } +} + +/// Options for killing nodes. #[derive(Debug, Clone, Copy, PartialEq)] pub struct KillOpts { pub kill_rate: f32, @@ -522,3 +559,14 @@ pub struct KillOpts { pub kill_compute: bool, pub kill_compactor: bool, } + +impl KillOpts { + /// Killing all kind of nodes. + pub const ALL: Self = KillOpts { + kill_rate: 1.0, + kill_meta: true, + kill_frontend: true, + kill_compute: true, + kill_compactor: true, + }; +} diff --git a/src/tests/simulation/src/ctl_ext.rs b/src/tests/simulation/src/ctl_ext.rs index 562ee1345737b..769a331f5f1b6 100644 --- a/src/tests/simulation/src/ctl_ext.rs +++ b/src/tests/simulation/src/ctl_ext.rs @@ -24,7 +24,7 @@ use rand::seq::{IteratorRandom, SliceRandom}; use rand::Rng; use risingwave_common::hash::ParallelUnitId; use risingwave_pb::meta::table_fragments::fragment::FragmentDistributionType; -use risingwave_pb::meta::table_fragments::Fragment as ProstFragment; +use risingwave_pb::meta::table_fragments::PbFragment; use risingwave_pb::meta::GetClusterInfoResponse; use risingwave_pb::stream_plan::StreamNode; @@ -35,10 +35,10 @@ use crate::cluster::Cluster; pub mod predicate { use super::*; - trait Predicate = Fn(&ProstFragment) -> bool + Send + 'static; + trait Predicate = Fn(&PbFragment) -> bool + Send + 'static; pub type BoxedPredicate = Box; - fn root(fragment: &ProstFragment) -> &StreamNode { + fn root(fragment: &PbFragment) -> &StreamNode { fragment.actors.first().unwrap().nodes.as_ref().unwrap() } @@ -58,7 +58,7 @@ pub mod predicate { /// There're exactly `n` operators whose identity contains `s` in the fragment. pub fn identity_contains_n(n: usize, s: impl Into) -> BoxedPredicate { let s: String = s.into(); - let p = move |f: &ProstFragment| { + let p = move |f: &PbFragment| { count(root(f), &|n| { n.identity.to_lowercase().contains(&s.to_lowercase()) }) == n @@ -69,7 +69,7 @@ pub mod predicate { /// There exists operators whose identity contains `s` in the fragment. pub fn identity_contains(s: impl Into) -> BoxedPredicate { let s: String = s.into(); - let p = move |f: &ProstFragment| { + let p = move |f: &PbFragment| { any(root(f), &|n| { n.identity.to_lowercase().contains(&s.to_lowercase()) }) @@ -80,7 +80,7 @@ pub mod predicate { /// There does not exist any operator whose identity contains `s` in the fragment. pub fn no_identity_contains(s: impl Into) -> BoxedPredicate { let s: String = s.into(); - let p = move |f: &ProstFragment| { + let p = move |f: &PbFragment| { all(root(f), &|n| { !n.identity.to_lowercase().contains(&s.to_lowercase()) }) @@ -90,20 +90,22 @@ pub mod predicate { /// There're `n` upstream fragments of the fragment. pub fn upstream_fragment_count(n: usize) -> BoxedPredicate { - let p = move |f: &ProstFragment| f.upstream_fragment_ids.len() == n; + let p = move |f: &PbFragment| f.upstream_fragment_ids.len() == n; Box::new(p) } /// The fragment is able to be rescheduled. Used for locating random fragment. pub fn can_reschedule() -> BoxedPredicate { - // The rescheduling of `Chain` must be derived from the upstream `Materialize`, not - // specified by the user. - no_identity_contains("StreamTableScan") + // The rescheduling of no-shuffle downstreams must be derived from the upstream + // `Materialize`, not specified by the user. + let p = + |f: &PbFragment| no_identity_contains("Chain")(f) && no_identity_contains("Lookup")(f); + Box::new(p) } /// The fragment with the given id. pub fn id(id: u32) -> BoxedPredicate { - let p = move |f: &ProstFragment| f.fragment_id == id; + let p = move |f: &PbFragment| f.fragment_id == id; Box::new(p) } } diff --git a/src/tests/simulation/src/kafka.rs b/src/tests/simulation/src/kafka.rs index fccf93f686054..93abf1cd98b56 100644 --- a/src/tests/simulation/src/kafka.rs +++ b/src/tests/simulation/src/kafka.rs @@ -82,7 +82,11 @@ pub async fn producer(broker_addr: &str, datadir: String) { // binary message data, a file is a message Box::new(std::iter::once(content.as_slice())) } else { - Box::new(content.split(|&b| b == b'\n')) + Box::new( + content + .split(|&b| b == b'\n') + .filter(|line| !line.is_empty()), + ) }; for msg in msgs { loop { diff --git a/src/tests/simulation/src/nexmark/create_source.sql b/src/tests/simulation/src/nexmark/create_source.sql index b2676f0c597df..e2624e6a716c6 100644 --- a/src/tests/simulation/src/nexmark/create_source.sql +++ b/src/tests/simulation/src/nexmark/create_source.sql @@ -1,14 +1,14 @@ create source auction ( id BIGINT, - "item_name" VARCHAR, + item_name VARCHAR, description VARCHAR, - "initial_bid" BIGINT, + initial_bid BIGINT, reserve BIGINT, - "date_time" TIMESTAMP, + date_time TIMESTAMP, expires TIMESTAMP, seller BIGINT, category BIGINT, - "extra" VARCHAR + extra VARCHAR {watermark_column}) with ( connector = 'nexmark', @@ -20,10 +20,10 @@ create source bid ( auction BIGINT, bidder BIGINT, price BIGINT, - "channel" VARCHAR, - "url" VARCHAR, - "date_time" TIMESTAMP, - "extra" VARCHAR + channel VARCHAR, + url VARCHAR, + date_time TIMESTAMP, + extra VARCHAR {watermark_column}) with ( connector = 'nexmark', @@ -34,12 +34,12 @@ with ( create source person ( id BIGINT, name VARCHAR, - "email_address" VARCHAR, - "credit_card" VARCHAR, + email_address VARCHAR, + credit_card VARCHAR, city VARCHAR, state VARCHAR, - "date_time" TIMESTAMP, - "extra" VARCHAR + date_time TIMESTAMP, + extra VARCHAR {watermark_column}) with ( connector = 'nexmark', diff --git a/src/tests/simulation/src/risingwave.toml b/src/tests/simulation/src/risingwave.toml index 8a4e81637021f..b88e8a0644f7c 100644 --- a/src/tests/simulation/src/risingwave.toml +++ b/src/tests/simulation/src/risingwave.toml @@ -2,6 +2,9 @@ # # Note: this file is embedded in the binary and cannot be changed without recompiling. -[streaming] +[system] barrier_interval_ms = 250 checkpoint_frequency = 4 + +[server] +telemetry_enabled = false diff --git a/src/tests/simulation/src/slt.rs b/src/tests/simulation/src/slt.rs index d85d1d6bf7b3e..9dc83472b98e6 100644 --- a/src/tests/simulation/src/slt.rs +++ b/src/tests/simulation/src/slt.rs @@ -21,6 +21,7 @@ use sqllogictest::ParallelTestError; use crate::client::RisingWave; use crate::cluster::{Cluster, KillOpts}; +use crate::utils::TimedExt; fn is_create_table_as(sql: &str) -> bool { let parts: Vec = sql @@ -112,7 +113,13 @@ pub async fn run_slt_task(cluster: Arc, glob: &str, opts: &KillOpts) { // For normal records. if !kill { - match tester.run_async(record).await { + match tester + .run_async(record.clone()) + .timed(|_res, elapsed| { + tracing::debug!("Record {:?} finished in {:?}", record, elapsed) + }) + .await + { Ok(_) => continue, Err(e) => panic!("{}", e), } @@ -128,7 +135,13 @@ pub async fn run_slt_task(cluster: Arc, glob: &str, opts: &KillOpts) { if cmd.ignore_kill() { for i in 0usize.. { let delay = Duration::from_secs(1 << i); - if let Err(err) = tester.run_async(record.clone()).await { + if let Err(err) = tester + .run_async(record.clone()) + .timed(|_res, elapsed| { + tracing::debug!("Record {:?} finished in {:?}", record, elapsed) + }) + .await + { // cluster could be still under recovering if killed before, retry if // meets `no reader for dml in table with id {}`. let should_retry = @@ -162,7 +175,13 @@ pub async fn run_slt_task(cluster: Arc, glob: &str, opts: &KillOpts) { // retry up to 5 times until it succeed for i in 0usize.. { let delay = Duration::from_secs(1 << i); - match tester.run_async(record.clone()).await { + match tester + .run_async(record.clone()) + .timed(|_res, elapsed| { + tracing::debug!("Record {:?} finished in {:?}", record, elapsed) + }) + .await + { Ok(_) => break, // allow 'table exists' error when retry CREATE statement Err(e) diff --git a/src/tests/simulation/src/utils.rs b/src/tests/simulation/src/utils/assert_result.rs similarity index 100% rename from src/tests/simulation/src/utils.rs rename to src/tests/simulation/src/utils/assert_result.rs diff --git a/src/tests/simulation/src/utils/mod.rs b/src/tests/simulation/src/utils/mod.rs new file mode 100644 index 0000000000000..b3b726467e613 --- /dev/null +++ b/src/tests/simulation/src/utils/mod.rs @@ -0,0 +1,19 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod assert_result; +pub use assert_result::*; + +mod timed_future; +pub use timed_future::*; diff --git a/src/tests/simulation/src/utils/timed_future.rs b/src/tests/simulation/src/utils/timed_future.rs new file mode 100644 index 0000000000000..b9003552a0ac8 --- /dev/null +++ b/src/tests/simulation/src/utils/timed_future.rs @@ -0,0 +1,76 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::future::Future; +use std::pin::{pin, Pin}; +use std::task::{Context, Poll}; +use std::time::{Duration, Instant}; + +use pin_project::pin_project; + +/// Inspired by https://stackoverflow.com/a/59935743/2990323 +/// A wrapper around a Future which adds timing data. +#[pin_project] +pub struct Timed +where + Fut: Future, + F: Fn(&Fut::Output, Duration), +{ + #[pin] + inner: Fut, + f: F, + start: Option, +} + +impl Future for Timed +where + Fut: Future, + F: Fn(&Fut::Output, Duration), +{ + type Output = Fut::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); + let start = this.start.get_or_insert_with(Instant::now); + + match this.inner.poll(cx) { + // If the inner future is still pending, this wrapper is still pending. + Poll::Pending => Poll::Pending, + + // If the inner future is done, measure the elapsed time and finish this wrapper future. + Poll::Ready(v) => { + let elapsed = start.elapsed(); + (this.f)(&v, elapsed); + + Poll::Ready(v) + } + } + } +} + +pub trait TimedExt: Sized + Future { + fn timed(self, f: F) -> Timed + where + F: Fn(&Self::Output, Duration), + { + Timed { + inner: self, + f, + start: None, + } + } +} + +// All futures can use the `.timed` method defined above +impl TimedExt for F {} diff --git a/src/tests/simulation/tests/it/cascade_materialized_view.rs b/src/tests/simulation/tests/it/cascade_materialized_view.rs index f3bcaf7c82283..4e3dd4a2695de 100644 --- a/src/tests/simulation/tests/it/cascade_materialized_view.rs +++ b/src/tests/simulation/tests/it/cascade_materialized_view.rs @@ -33,9 +33,10 @@ const MV5: &str = "create materialized view m5 as select * from m4;"; #[madsim::test] async fn test_simple_cascade_materialized_view() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; + let mut session = cluster.start_session(); - cluster.run(ROOT_TABLE_CREATE).await?; - cluster.run(MV1).await?; + session.run(ROOT_TABLE_CREATE).await?; + session.run(MV1).await?; let fragment = cluster .locate_one_fragment([ @@ -62,17 +63,17 @@ async fn test_simple_cascade_materialized_view() -> Result<()> { fragment.inner.actors.len() ); - cluster + session .run(&format!( "insert into t1 values {}", (1..=10).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; + session.run("flush").await?; // v1 > 5, result is [6, 7, 8, 9, 10] - cluster + session .run("select count(*) from m1") .await? .assert_result_eq("5"); @@ -92,21 +93,21 @@ async fn test_simple_cascade_materialized_view() -> Result<()> { fragment.inner.actors.len() ); - cluster + session .run("select count(*) from m1") .await? .assert_result_eq("5"); - cluster + session .run(&format!( "insert into t1 values {}", (11..=20).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; + session.run("flush").await?; // 10 < v1 < 15, result is [11, 12, 13, 14] - cluster + session .run("select count(*) from m1") .await? .assert_result_eq("15"); @@ -117,13 +118,14 @@ async fn test_simple_cascade_materialized_view() -> Result<()> { #[madsim::test] async fn test_diamond_cascade_materialized_view() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; + let mut session = cluster.start_session(); - cluster.run(ROOT_TABLE_CREATE).await?; - cluster.run(MV1).await?; - cluster.run(MV2).await?; - cluster.run(MV3).await?; - cluster.run(MV4).await?; - cluster.run(MV5).await?; + session.run(ROOT_TABLE_CREATE).await?; + session.run(MV1).await?; + session.run(MV2).await?; + session.run(MV3).await?; + session.run(MV4).await?; + session.run(MV5).await?; let fragment = cluster .locate_one_fragment([ @@ -141,15 +143,15 @@ async fn test_diamond_cascade_materialized_view() -> Result<()> { let fragment = cluster.locate_fragment_by_id(id).await?; assert_eq!(fragment.inner.actors.len(), 1); - cluster + session .run(&format!( "insert into t1 values {}", (1..=10).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; - cluster + session.run("flush").await?; + session .run("select count(*) from m5") .await? .assert_result_eq("0"); @@ -160,20 +162,20 @@ async fn test_diamond_cascade_materialized_view() -> Result<()> { let fragment = cluster.locate_fragment_by_id(id).await?; assert_eq!(fragment.inner.actors.len(), 6); - cluster + session .run("select count(*) from m5") .await? .assert_result_eq("0"); - cluster + session .run(&format!( "insert into t1 values {}", (11..=20).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; - cluster + session.run("flush").await?; + session .run("select count(*) from m5") .await? .assert_result_eq("4"); diff --git a/src/tests/simulation/tests/it/delta_join.rs b/src/tests/simulation/tests/it/delta_join.rs new file mode 100644 index 0000000000000..7f109e7db3126 --- /dev/null +++ b/src/tests/simulation/tests/it/delta_join.rs @@ -0,0 +1,123 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(madsim)] + +use anyhow::Result; +use itertools::Itertools; +use risingwave_simulation::cluster::{Cluster, Configuration}; +use risingwave_simulation::ctl_ext::predicate::identity_contains; +use risingwave_simulation::utils::AssertResult; + +#[madsim::test] +async fn test_delta_join() -> Result<()> { + let mut cluster = Cluster::start(Configuration::for_scale()).await?; + let mut session = cluster.start_session(); + + session.run("set rw_implicit_flush = true;").await?; + session + .run("set rw_streaming_enable_delta_join = true;") + .await?; + + session + .run("create table a (a1 int primary key, a2 int);") + .await?; + session + .run("create table b (b1 int primary key, b2 int);") + .await?; + let [t1, t2]: [_; 2] = cluster + .locate_fragments([identity_contains("materialize")]) + .await? + .try_into() + .unwrap(); + + session + .run("create materialized view v as select * from a join b on a.a1 = b.b1;") + .await?; + let lookup_fragments = cluster + .locate_fragments([identity_contains("lookup")]) + .await?; + assert_eq!(lookup_fragments.len(), 2, "failed to plan delta join"); + let union_fragment = cluster + .locate_one_fragment([identity_contains("union")]) + .await?; + + let mut test_times = 0; + macro_rules! test_works { + () => { + let keys = || (0..100).map(|i| test_times * 100 + i); + + for key in keys() { + session + .run(format!("insert into a values ({key}, 233)")) + .await?; + session + .run(format!("insert into b values ({key}, 666)")) + .await?; + } + session.run("flush").await?; + + let result = keys() + .rev() + .map(|key| format!("{key} 233 {key} 666")) + .join("\n"); + + session + .run("select * from v order by a1 desc limit 100;") + .await? + .assert_result_eq(result); + + #[allow(unused_assignments)] + test_times += 1; + }; + } + + test_works!(); + + // Scale-in one side + cluster.reschedule(format!("{}-[0]", t1.id())).await?; + test_works!(); + + // Scale-in both sides together + cluster + .reschedule(format!("{}-[2];{}-[0,2]", t1.id(), t2.id())) + .await?; + test_works!(); + + // Scale-out one side + cluster.reschedule(format!("{}+[0]", t2.id())).await?; + test_works!(); + + // Scale-out both sides together + cluster + .reschedule(format!("{}+[0,2];{}+[2]", t1.id(), t2.id())) + .await?; + test_works!(); + + // Scale-in join with union + cluster + .reschedule(format!("{}-[5];{}-[5]", t1.id(), union_fragment.id())) + .await?; + test_works!(); + + let result = cluster + .reschedule(format!("{}-[0]", lookup_fragments[0].id())) + .await; + assert!( + result.is_err(), + "directly scale-in lookup (downstream) should fail" + ); + + Ok(()) +} diff --git a/src/tests/simulation/tests/it/dynamic_filter.rs b/src/tests/simulation/tests/it/dynamic_filter.rs index e03432498d5dc..4736a2cdcfd1d 100644 --- a/src/tests/simulation/tests/it/dynamic_filter.rs +++ b/src/tests/simulation/tests/it/dynamic_filter.rs @@ -28,12 +28,13 @@ const SELECT: &str = "select * from mv1 order by v1;"; #[madsim::test] async fn test_dynamic_filter() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; + let mut session = cluster.start_session(); - cluster.run("create table t1 (v1 int);").await?; - cluster.run("create table t2 (v2 int);").await?; - cluster.run("create materialized view mv1 as with max_v2 as (select max(v2) max from t2) select v1 from t1, max_v2 where v1 > max;").await?; - cluster.run("insert into t1 values (1), (2), (3)").await?; - cluster.run("flush").await?; + session.run("create table t1 (v1 int);").await?; + session.run("create table t2 (v2 int);").await?; + session.run("create materialized view mv1 as with max_v2 as (select max(v2) max from t2) select v1 from t1, max_v2 where v1 > max;").await?; + session.run("insert into t1 values (1), (2), (3)").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; let dynamic_filter_fragment = cluster @@ -60,53 +61,53 @@ async fn test_dynamic_filter() -> Result<()> { cluster.reschedule(format!("{id}-[1,2,3]")).await?; sleep(Duration::from_secs(3)).await; - cluster.run(SELECT).await?.assert_result_eq(""); - cluster.run("insert into t2 values (0)").await?; - cluster.run("flush").await?; + session.run(SELECT).await?.assert_result_eq(""); + session.run("insert into t2 values (0)").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; - cluster.run(SELECT).await?.assert_result_eq("1\n2\n3"); + session.run(SELECT).await?.assert_result_eq("1\n2\n3"); // 1 // 2 // 3 cluster.reschedule(format!("{id}-[4,5]+[1,2,3]")).await?; sleep(Duration::from_secs(3)).await; - cluster.run(SELECT).await?.assert_result_eq("1\n2\n3"); + session.run(SELECT).await?.assert_result_eq("1\n2\n3"); - cluster.run("insert into t2 values (2)").await?; - cluster.run("flush").await?; + session.run("insert into t2 values (2)").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; - cluster.run(SELECT).await?.assert_result_eq("3"); + session.run(SELECT).await?.assert_result_eq("3"); // 3 cluster.reschedule(format!("{id}-[1,2,3]+[4,5]")).await?; sleep(Duration::from_secs(3)).await; - cluster.run(SELECT).await?.assert_result_eq("3"); + session.run(SELECT).await?.assert_result_eq("3"); - cluster.run("update t2 set v2 = 1 where v2 = 2").await?; - cluster.run("flush").await?; + session.run("update t2 set v2 = 1 where v2 = 2").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; - cluster.run(SELECT).await?.assert_result_eq("2\n3"); + session.run(SELECT).await?.assert_result_eq("2\n3"); // 2 // 3 // cluster.reschedule(format!("{id}+[1,2,3]")).await?; sleep(Duration::from_secs(3)).await; - cluster.run(SELECT).await?.assert_result_eq("2\n3"); + session.run(SELECT).await?.assert_result_eq("2\n3"); - cluster.run("delete from t2 where true").await?; - cluster.run("flush").await?; + session.run("delete from t2 where true").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; - cluster.run(SELECT).await?.assert_result_eq(""); + session.run(SELECT).await?.assert_result_eq(""); cluster.reschedule(format!("{id}-[1]")).await?; sleep(Duration::from_secs(3)).await; - cluster.run(SELECT).await?.assert_result_eq(""); + session.run(SELECT).await?.assert_result_eq(""); - cluster.run("insert into t2 values (1)").await?; - cluster.run("flush").await?; + session.run("insert into t2 values (1)").await?; + session.run("flush").await?; sleep(Duration::from_secs(5)).await; - cluster.run(SELECT).await?.assert_result_eq("2\n3"); + session.run(SELECT).await?.assert_result_eq("2\n3"); Ok(()) } diff --git a/src/tests/simulation/tests/it/main.rs b/src/tests/simulation/tests/it/main.rs index b9bd44b857b9e..ee2f7f003b535 100644 --- a/src/tests/simulation/tests/it/main.rs +++ b/src/tests/simulation/tests/it/main.rs @@ -17,11 +17,15 @@ //! See [this post](https://matklad.github.io/2021/02/27/delete-cargo-integration-tests.html) //! for the rationale behind this approach. +#![feature(stmt_expr_attributes)] + mod cascade_materialized_view; +mod delta_join; mod dynamic_filter; mod hello; mod nexmark_chaos; mod nexmark_q4; +mod nexmark_recovery; mod nexmark_source; mod singleton_migration; mod streaming_parallelism; diff --git a/src/tests/simulation/tests/it/nexmark_chaos.rs b/src/tests/simulation/tests/it/nexmark_chaos.rs index f800b8ade7034..34905b5e2f199 100644 --- a/src/tests/simulation/tests/it/nexmark_chaos.rs +++ b/src/tests/simulation/tests/it/nexmark_chaos.rs @@ -41,15 +41,17 @@ async fn nexmark_chaos_common_inner( ) -> Result<()> { let mut cluster = NexmarkCluster::new(Configuration::for_scale(), 6, Some(20 * THROUGHPUT), false).await?; - cluster.run(create).await?; + let mut session = cluster.start_session(); + session.run(create).await?; sleep(Duration::from_secs(30)).await; - let final_result = cluster.run(select).await?; - cluster.run(drop).await?; + let final_result = session.run(select).await?; + session.run(drop).await?; sleep(Duration::from_secs(5)).await; println!("Reference run done."); - - cluster.run(create).await?; + // Create a new session for the chaos run. + let mut session = cluster.start_session(); + session.run(create).await?; let _initial_result = cluster .wait_until_non_empty(select, initial_interval, initial_timeout) @@ -68,7 +70,7 @@ async fn nexmark_chaos_common_inner( cluster.reschedule(join_plans(fragments)).await?; sleep(after_scale_duration).await; - cluster.run(select).await?.assert_result_ne(&final_result); + session.run(select).await?.assert_result_ne(&final_result); let fragments = cluster.locate_random_fragments().await?; cluster.reschedule(join_plans(fragments)).await?; @@ -78,7 +80,7 @@ async fn nexmark_chaos_common_inner( cluster.reschedule(fragment.random_reschedule()).await?; sleep(after_scale_duration).await; - cluster.run(select).await?.assert_result_ne(&final_result); + session.run(select).await?.assert_result_ne(&final_result); let fragment = cluster.locate_fragment_by_id(id).await?; cluster.reschedule(fragment.random_reschedule()).await?; @@ -86,7 +88,7 @@ async fn nexmark_chaos_common_inner( sleep(Duration::from_secs(50)).await; - cluster.run(select).await?.assert_result_eq(&final_result); + session.run(select).await?.assert_result_eq(&final_result); Ok(()) } diff --git a/src/tests/simulation/tests/it/nexmark_recovery.rs b/src/tests/simulation/tests/it/nexmark_recovery.rs new file mode 100644 index 0000000000000..cd4ad6e2c80e4 --- /dev/null +++ b/src/tests/simulation/tests/it/nexmark_recovery.rs @@ -0,0 +1,84 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(madsim)] + +use std::time::Duration; + +use anyhow::Result; +use madsim::time::{sleep, Instant}; +use risingwave_simulation::cluster::{Configuration, KillOpts}; +use risingwave_simulation::nexmark::{self, NexmarkCluster, THROUGHPUT}; +use risingwave_simulation::utils::AssertResult; + +/// Setup a nexmark stream, inject failures, and verify results. +async fn nexmark_recovery_common(create: &str, select: &str, drop: &str) -> Result<()> { + // tracing_subscriber::fmt() + // .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + // .init(); + + let mut cluster = + NexmarkCluster::new(Configuration::for_scale(), 6, Some(THROUGHPUT * 20), false).await?; + + // get the output without failures as the standard result + cluster.run(create).await?; + sleep(Duration::from_secs(30)).await; + let expected = cluster.run(select).await?; + cluster.run(drop).await?; + sleep(Duration::from_secs(5)).await; + + cluster.run(create).await?; + + // kill nodes and trigger recovery + for _ in 0..5 { + sleep(Duration::from_secs(2)).await; + cluster.kill_node(&KillOpts::ALL).await; + } + // wait enough time to make sure the stream is end + sleep(Duration::from_secs(60)).await; + + cluster.run(select).await?.assert_result_eq(&expected); + + Ok(()) +} + +macro_rules! test { + ($query:ident) => { + paste::paste! { + #[madsim::test] + async fn [< nexmark_recovery_ $query >]() -> Result<()> { + use risingwave_simulation::nexmark::queries::$query::*; + nexmark_recovery_common(CREATE, SELECT, DROP) + .await + } + } + }; +} + +// q0, q1, q2: too trivial +test!(q3); +test!(q4); +test!(q5); +// q6: cannot plan +test!(q7); +test!(q8); +test!(q9); +// q10+: duplicated or unsupported + +// Self made queries. +test!(q101); +test!(q102); +test!(q103); +test!(q104); +test!(q105); diff --git a/src/tests/simulation/tests/it/singleton_migration.rs b/src/tests/simulation/tests/it/singleton_migration.rs index e480b7aedb555..7b4482de26c1a 100644 --- a/src/tests/simulation/tests/it/singleton_migration.rs +++ b/src/tests/simulation/tests/it/singleton_migration.rs @@ -32,10 +32,11 @@ const CASCADE_MV: &str = "create materialized view m2 as select * from m1;"; #[madsim::test] async fn test_singleton_migration() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; + let mut session = cluster.start_session(); - cluster.run(ROOT_TABLE_CREATE).await?; - cluster.run(ROOT_MV).await?; - cluster.run(CASCADE_MV).await?; + session.run(ROOT_TABLE_CREATE).await?; + session.run(ROOT_MV).await?; + session.run(CASCADE_MV).await?; let fragment = cluster .locate_one_fragment(vec![ @@ -69,16 +70,16 @@ async fn test_singleton_migration() -> Result<()> { sleep(Duration::from_secs(3)).await; - cluster + session .run(&format!( "insert into t values {}", (1..=10).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; + session.run("flush").await?; - cluster + session .run("select * from m2") .await? .assert_result_eq("10"); @@ -94,16 +95,16 @@ async fn test_singleton_migration() -> Result<()> { sleep(Duration::from_secs(3)).await; - cluster + session .run(&format!( "insert into t values {}", (11..=20).map(|x| format!("({x})")).join(",") )) .await?; - cluster.run("flush").await?; + session.run("flush").await?; - cluster + session .run("select * from m2") .await? .assert_result_eq("20"); diff --git a/src/tests/simulation/tests/it/sink.rs b/src/tests/simulation/tests/it/sink.rs index a92043c3c5a90..ac95d98cffade 100644 --- a/src/tests/simulation/tests/it/sink.rs +++ b/src/tests/simulation/tests/it/sink.rs @@ -28,9 +28,9 @@ use risingwave_simulation::cluster::{Cluster, Configuration}; use risingwave_simulation::ctl_ext::predicate::{identity_contains, no_identity_contains}; const ROOT_TABLE_CREATE: &str = "create table t (v1 int) append only;"; -const APPEND_ONLY_SINK_CREATE: &str = "create sink s1 from t with (connector='kafka', properties.bootstrap.server='192.168.11.1:29092', topic='t_sink_append_only', format='append_only');"; +const APPEND_ONLY_SINK_CREATE: &str = "create sink s1 from t with (connector='kafka', properties.bootstrap.server='192.168.11.1:29092', topic='t_sink_append_only', type='append-only');"; const MV_CREATE: &str = "create materialized view m as select count(*) from t;"; -const DEBEZIUM_SINK_CREATE: &str = "create sink s2 from m with (connector='kafka', properties.bootstrap.server='192.168.11.1:29092', topic='t_sink_debezium', format='debezium');"; +const DEBEZIUM_SINK_CREATE: &str = "create sink s2 from m with (connector='kafka', properties.bootstrap.server='192.168.11.1:29092', topic='t_sink_debezium', type='debezium');"; const APPEND_ONLY_TOPIC: &str = "t_sink_append_only"; const DEBEZIUM_TOPIC: &str = "t_sink_debezium"; diff --git a/src/tests/simulation/tests/it/streaming_parallelism.rs b/src/tests/simulation/tests/it/streaming_parallelism.rs index 24d678eb7234b..38c074aeeed2a 100644 --- a/src/tests/simulation/tests/it/streaming_parallelism.rs +++ b/src/tests/simulation/tests/it/streaming_parallelism.rs @@ -15,10 +15,8 @@ #![cfg(madsim)] use anyhow::Result; -use risingwave_simulation::client::RisingWave; use risingwave_simulation::cluster::{Cluster, Configuration}; use risingwave_simulation::ctl_ext::predicate::identity_contains; -use sqllogictest::runner::AsyncDB; #[madsim::test] async fn test_streaming_parallelism_default() -> Result<()> { @@ -32,33 +30,19 @@ async fn test_streaming_parallelism_default() -> Result<()> { Ok(()) } -async fn run_sqls_in_session(cluster: &Cluster, sqls: Vec) { - cluster - .run_on_client(async move { - let mut session = RisingWave::connect("frontend".into(), "dev".into()) - .await - .expect("failed to connect to RisingWave"); - for sql in sqls { - session.run(&sql).await.unwrap(); - } - }) - .await; -} - #[madsim::test] async fn test_streaming_parallelism_set_some() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; let default_parallelism = cluster.config().compute_nodes * cluster.config().compute_node_cores; let target_parallelism = default_parallelism - 1; assert!(target_parallelism > 0); - run_sqls_in_session( - &cluster, - vec![ - format!("set streaming_parallelism={};", target_parallelism), - "create table t1 (c1 int, c2 int);".to_string(), - ], - ) - .await; + + let mut session = cluster.start_session(); + session + .run(format!("set streaming_parallelism={};", target_parallelism)) + .await?; + session.run("create table t1 (c1 int, c2 int);").await?; + let materialize_fragment = cluster .locate_one_fragment([identity_contains("materialize")]) .await?; @@ -70,14 +54,11 @@ async fn test_streaming_parallelism_set_some() -> Result<()> { async fn test_streaming_parallelism_set_zero() -> Result<()> { let mut cluster = Cluster::start(Configuration::for_scale()).await?; let default_parallelism = cluster.config().compute_nodes * cluster.config().compute_node_cores; - run_sqls_in_session( - &cluster, - vec![ - "set streaming_parallelism=0;".to_string(), - "create table t1 (c1 int, c2 int);".to_string(), - ], - ) - .await; + + let mut session = cluster.start_session(); + session.run("set streaming_parallelism=0;").await?; + session.run("create table t1 (c1 int, c2 int);").await?; + let materialize_fragment = cluster .locate_one_fragment([identity_contains("materialize")]) .await?; @@ -91,17 +72,22 @@ async fn test_streaming_parallelism_mv_on_mv() -> Result<()> { let default_parallelism = cluster.config().compute_nodes * cluster.config().compute_node_cores; let target_parallelism = default_parallelism - 1; assert!(target_parallelism - 1 > 0); - run_sqls_in_session( - &cluster, - vec![ - format!("set streaming_parallelism={};", target_parallelism), - "create table t1 (c1 int, c2 int);".to_string(), - format!("set streaming_parallelism={};", target_parallelism - 1), - "create materialized view mv3 as select c1,count(*) as cc from t1 group by c1;" - .to_string(), - ], - ) - .await; + + let mut session = cluster.start_session(); + session + .run(format!("set streaming_parallelism={};", target_parallelism)) + .await?; + session.run("create table t1 (c1 int, c2 int);").await?; + session + .run(format!( + "set streaming_parallelism={};", + target_parallelism - 1 + )) + .await?; + session + .run("create materialized view mv1 as select c1,count(*) as cc from t1 group by c1;") + .await?; + let materialize_fragments = cluster .locate_fragments([identity_contains("materialize")]) .await?; @@ -123,16 +109,20 @@ async fn test_streaming_parallelism_index() -> Result<()> { let default_parallelism = cluster.config().compute_nodes * cluster.config().compute_node_cores; let target_parallelism = default_parallelism - 1; assert!(target_parallelism - 1 > 0); - run_sqls_in_session( - &cluster, - vec![ - format!("set streaming_parallelism={};", target_parallelism), - "create table t1 (c1 int, c2 int);".to_string(), - format!("set streaming_parallelism={};", target_parallelism - 1), - "create index idx1 on t1(c2);".to_string(), - ], - ) - .await; + + let mut session = cluster.start_session(); + session + .run(format!("set streaming_parallelism={};", target_parallelism)) + .await?; + session.run("create table t1 (c1 int, c2 int);").await?; + session + .run(format!( + "set streaming_parallelism={};", + target_parallelism - 1 + )) + .await?; + session.run("create index idx1 on t1(c2);").await?; + let materialize_fragments = cluster .locate_fragments([identity_contains("materialize")]) .await?; diff --git a/src/tests/sqlsmith/Cargo.toml b/src/tests/sqlsmith/Cargo.toml index b624229dfad30..9eb3fd59bb051 100644 --- a/src/tests/sqlsmith/Cargo.toml +++ b/src/tests/sqlsmith/Cargo.toml @@ -20,6 +20,7 @@ clap = { version = "4", features = ["derive"] } itertools = "0.10" rand = { version = "0.8", features = ["small_rng"] } rand_chacha = { version = "0.3.1" } +regex = "1" risingwave_common = { path = "../../common" } risingwave_expr = { path = "../../expr" } risingwave_frontend = { path = "../../frontend" } diff --git a/src/tests/sqlsmith/scripts/gen_queries.sh b/src/tests/sqlsmith/scripts/gen_queries.sh index 23688815aed56..94f1a8b9e1066 100755 --- a/src/tests/sqlsmith/scripts/gen_queries.sh +++ b/src/tests/sqlsmith/scripts/gen_queries.sh @@ -30,6 +30,7 @@ echo_err() { } ################## EXTRACT +# TODO(kwannoel): Write tests for these # Get reason for generation crash. get_failure_reason() { @@ -67,7 +68,7 @@ extract_failing_query() { grep "\[EXECUTING .*\]: " | tail -n 1 | sed -E 's/^.*\[EXECUTING .*\]: (.*)$/\1;/' || true } -# Extract fail info from logs in log dir +# Extract fail info from [`generate-*.log`] in log dir extract_fail_info_from_logs() { for LOGFILENAME in $(ls "$LOGDIR" | grep "generate") do @@ -86,14 +87,17 @@ extract_fail_info_from_logs() { FAIL_DIR="$OUTDIR/failed/$SEED" mkdir -p "$FAIL_DIR" echo -e "$DDL" "\n$GLOBAL_SESSION" "\n$DML" "\n$TEST_SESSION" "\n$QUERY" > "$FAIL_DIR/queries.sql" - echo_err "[INFO] WROTE FAIL QUERY to $FAIL_DIR/queries.log" + echo_err "[INFO] WROTE FAIL QUERY to $FAIL_DIR/queries.sql" echo -e "$REASON" > "$FAIL_DIR/fail.log" echo_err "[INFO] WROTE FAIL REASON to $FAIL_DIR/fail.log" + cp "$LOGFILE" "$FAIL_DIR/$LOGFILENAME" fi done } +################# Generate + # Prefer to use [`generate_deterministic`], it is faster since # runs with all-in-one binary. generate_deterministic() { @@ -123,6 +127,8 @@ generate_sqlsmith() { --generate "$OUTDIR/$1" } +############################# Checks + # Check that queries are different check_different_queries() { if [[ -z $(diff "$OUTDIR/1/queries.sql" "$OUTDIR/2/queries.sql") ]]; then @@ -142,20 +148,6 @@ check_failed_to_generate_queries() { fi } -# Upload step -upload_queries() { - set +x - pushd "$OUTDIR" - git checkout -b stage - git add . - git commit -m 'update queries' - git push -f origin stage - git checkout - - git branch -D stage - popd - set -x -} - # Run it to make sure it should have no errors run_queries() { echo "" > $LOGDIR/run_deterministic.stdout.log @@ -166,6 +158,7 @@ run_queries() { && rm $LOGDIR/fuzzing-{}.log" } +# Generated query sets should not fail. check_failed_to_run_queries() { FAILED_LOGS=$(ls "$LOGDIR" | grep fuzzing || true) if [[ -n "$FAILED_LOGS" ]]; then @@ -209,6 +202,39 @@ validate() { echo_err "[INFO] Passed checks" } +# sync step +# Some queries maybe be added +sync_queries() { + set +x + pushd $OUTDIR + git checkout main + git pull + set +e + git branch -D stage + set -e + git checkout -b stage + popd + set -x +} + +sync() { + sync_queries + echo_err "[INFO] Synced" +} + +# Upload step +upload_queries() { + set +x + pushd "$OUTDIR" + git add . + git commit -m 'update queries' + git push -f origin stage + git checkout - + git branch -D stage + popd + set -x +} + upload() { upload_queries echo_err "[INFO] Uploaded" @@ -219,10 +245,13 @@ cleanup() { echo_err "[INFO] Success!" } +################### MAIN + main() { setup build + sync generate validate upload @@ -230,4 +259,4 @@ main() { cleanup } -main \ No newline at end of file +main diff --git a/src/tests/sqlsmith/src/lib.rs b/src/tests/sqlsmith/src/lib.rs index e09aba9b25ff3..232e0b75ba1e4 100644 --- a/src/tests/sqlsmith/src/lib.rs +++ b/src/tests/sqlsmith/src/lib.rs @@ -15,6 +15,7 @@ #![feature(let_chains)] #![feature(if_let_guard)] #![feature(once_cell)] +#![feature(box_patterns)] use rand::prelude::SliceRandom; use rand::Rng; @@ -25,8 +26,10 @@ use risingwave_sqlparser::parser::Parser; use crate::sql_gen::SqlGenerator; +pub mod reducer; pub mod runner; mod sql_gen; +mod utils; pub mod validation; pub use validation::is_permissible_error; @@ -76,7 +79,8 @@ pub fn session_sql_gen(rng: &mut R) -> String { /// Parse SQL /// FIXME(Noel): Introduce error type for sqlsmith for this. -pub fn parse_sql(sql: &str) -> Vec { +pub fn parse_sql>(sql: S) -> Vec { + let sql = sql.as_ref(); Parser::parse_sql(sql).unwrap_or_else(|_| panic!("Failed to parse SQL: {}", sql)) } diff --git a/src/tests/sqlsmith/src/reducer.rs b/src/tests/sqlsmith/src/reducer.rs new file mode 100644 index 0000000000000..f75505355e51e --- /dev/null +++ b/src/tests/sqlsmith/src/reducer.rs @@ -0,0 +1,407 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides E2E Test runner functionality. + +use std::collections::HashSet; +use std::path::Path; + +use anyhow::anyhow; +use itertools::Itertools; +#[cfg(madsim)] +use rand_chacha::ChaChaRng; +use regex::Regex; +use risingwave_sqlparser::ast::{ + Cte, Expr, FunctionArgExpr, Join, Query, Select, SetExpr, Statement, TableFactor, + TableWithJoins, With, +}; + +use crate::parse_sql; +use crate::utils::{create_file, read_file_contents, write_to_file}; + +type Result = anyhow::Result; + +/// Shrinks a given failing query file. +/// The shrunk query will be written to [`{outdir}/{filename}.reduced.sql`]. +pub fn shrink_file(input_file_path: &str, outdir: &str) -> Result<()> { + // read failed sql + let file_stem = Path::new(input_file_path) + .file_stem() + .ok_or_else(|| anyhow!("Failed to stem input file path: {input_file_path}"))?; + let output_file_path = format!("{outdir}/{}.reduced.sql", file_stem.to_string_lossy()); + let file_contents = read_file_contents(input_file_path)?; + + // reduce failed sql + let reduced_sql = shrink(&file_contents)?; + + // write reduced sql + let mut file = create_file(output_file_path).unwrap(); + write_to_file(&mut file, reduced_sql) +} + +fn shrink(sql: &str) -> Result { + let sql_statements = parse_sql(sql); + + // Session variable before the failing query. + let session_variable = sql_statements + .get(sql_statements.len() - 2) + .filter(|statement| matches!(statement, Statement::SetVariable { .. })); + + let failing_query = sql_statements + .last() + .ok_or_else(|| anyhow!("Could not get last sql statement"))?; + + let ddl_references = find_ddl_references(&sql_statements); + + tracing::info!("[DDL REFERENCES]: {}", ddl_references.iter().join(", ")); + + let mut ddl = sql_statements + .iter() + .filter(|s| { + matches!(*s, + Statement::CreateView { name, .. } | Statement::CreateTable { name, .. } + if ddl_references.contains(&name.real_value())) + }) + .collect(); + + let mut dml = sql_statements + .iter() + .filter(|s| { + matches!(*s, + Statement::Insert { table_name, .. } + if ddl_references.contains(&table_name.real_value())) + }) + .collect(); + + let mut reduced_statements = vec![]; + reduced_statements.append(&mut ddl); + reduced_statements.append(&mut dml); + if let Some(session_variable) = session_variable { + reduced_statements.push(session_variable); + } + reduced_statements.push(failing_query); + + let sql = reduced_statements + .iter() + .map(|s| format!("{s};\n")) + .collect::(); + + Ok(sql) +} + +pub(crate) fn find_ddl_references(sql_statements: &[Statement]) -> HashSet { + let mut ddl_references = HashSet::new(); + let mut sql_statements = sql_statements.iter().rev(); + let failing = sql_statements.next().unwrap(); + match failing { + Statement::Query(query) | Statement::CreateView { query, .. } => { + find_ddl_references_for_query(query.as_ref(), &mut ddl_references); + } + _ => {} + }; + for sql_statement in sql_statements { + match sql_statement { + Statement::Query(query) => { + find_ddl_references_for_query(query.as_ref(), &mut ddl_references); + } + Statement::CreateView { query, name, .. } + if ddl_references.contains(&name.real_value()) => + { + find_ddl_references_for_query(query.as_ref(), &mut ddl_references); + } + _ => {} + }; + } + ddl_references +} + +pub(crate) fn find_ddl_references_for_query(query: &Query, ddl_references: &mut HashSet) { + let Query { with, body, .. } = query; + if let Some(With { cte_tables, .. }) = with { + for Cte { query, .. } in cte_tables { + find_ddl_references_for_query(query, ddl_references) + } + } + find_ddl_references_for_query_in_set_expr(body, ddl_references); +} + +fn find_ddl_references_for_query_in_set_expr( + set_expr: &SetExpr, + ddl_references: &mut HashSet, +) { + match set_expr { + SetExpr::Select(box Select { from, .. }) => { + for table_with_joins in from { + find_ddl_references_for_query_in_table_with_joins(table_with_joins, ddl_references); + } + } + SetExpr::Query(q) => find_ddl_references_for_query(q, ddl_references), + SetExpr::SetOperation { left, right, .. } => { + find_ddl_references_for_query_in_set_expr(left, ddl_references); + find_ddl_references_for_query_in_set_expr(right, ddl_references); + } + SetExpr::Values(_) => {} + } +} + +fn find_ddl_references_for_query_in_table_with_joins( + TableWithJoins { relation, joins }: &TableWithJoins, + ddl_references: &mut HashSet, +) { + find_ddl_references_for_query_in_table_factor(relation, ddl_references); + for Join { relation, .. } in joins { + find_ddl_references_for_query_in_table_factor(relation, ddl_references); + } +} + +fn find_ddl_references_for_query_in_table_factor( + table_factor: &TableFactor, + ddl_references: &mut HashSet, +) { + match table_factor { + TableFactor::Table { name, .. } => { + ddl_references.insert(name.real_value()); + } + TableFactor::Derived { subquery, .. } => { + find_ddl_references_for_query(subquery, ddl_references) + } + TableFactor::TableFunction { name, args, .. } => { + let name = name.real_value(); + // https://docs.rs/regex/latest/regex/#grouping-and-flags + let regex = Regex::new(r"(?i)(tumble|hop)").unwrap(); + if regex.is_match(&name) && args.len() >= 3 { + let table_name = &args[0]; + if let FunctionArgExpr::Expr(Expr::Identifier(table_name)) = table_name.get_expr() { + ddl_references.insert(table_name.to_string().to_lowercase()); + } + } + } + TableFactor::NestedJoin(table_with_joins) => { + find_ddl_references_for_query_in_table_with_joins(table_with_joins, ddl_references); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const DDL_AND_DML: &str = " +CREATE TABLE T1 (V1 INT, V2 INT, V3 INT); +CREATE TABLE T2 (V1 INT, V2 INT, V3 INT); +CREATE TABLE T3 (V1 timestamp, V2 INT, V3 INT); +CREATE MATERIALIZED VIEW M1 AS SELECT * FROM T1; +CREATE MATERIALIZED VIEW M2 AS SELECT * FROM T2 LEFT JOIN T3 ON T2.V1 = T3.V2; +CREATE MATERIALIZED VIEW M3 AS SELECT * FROM T1 LEFT JOIN T2; +CREATE MATERIALIZED VIEW M4 AS SELECT * FROM M3; +INSERT INTO T1 VALUES(0, 0, 1); +INSERT INTO T1 VALUES(0, 0, 2); +INSERT INTO T2 VALUES(0, 0, 3); +INSERT INTO T2 VALUES(0, 0, 4); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 5); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 6); +SET RW_TWO_PHASE_AGG=TRUE; + "; + + fn sql_to_query(sql: &str) -> Box { + let sql_statement = parse_sql(sql).into_iter().next().unwrap(); + match sql_statement { + Statement::Query(query) | Statement::CreateView { query, .. } => query, + _ => panic!("Last statement was not a query, can't shrink"), + } + } + + #[test] + fn test_find_ddl_references_for_query_simple() { + let sql = "SELECT * FROM T1;"; + let query = sql_to_query(sql); + let mut ddl_references = HashSet::new(); + find_ddl_references_for_query(&query, &mut ddl_references); + println!("{:#?}", ddl_references); + assert!(ddl_references.contains("t1")); + } + + #[test] + fn test_find_ddl_references_for_tumble() { + let sql = "SELECT * FROM TUMBLE(T3, V1, INTERVAL '3' DAY);"; + let query = sql_to_query(sql); + let mut ddl_references = HashSet::new(); + find_ddl_references_for_query(&query, &mut ddl_references); + println!("{:#?}", ddl_references); + assert!(ddl_references.contains("t3")); + } + + #[test] + fn test_find_ddl_references_for_query_with_cte() { + let sql = "WITH WITH0 AS (SELECT * FROM M3) SELECT * FROM WITH0"; + let sql_statements = DDL_AND_DML.to_owned() + sql; + let sql_statements = parse_sql(sql_statements); + let ddl_references = find_ddl_references(&sql_statements); + assert!(ddl_references.contains("m3")); + assert!(ddl_references.contains("t1")); + assert!(ddl_references.contains("t2")); + + assert!(!ddl_references.contains("m4")); + assert!(!ddl_references.contains("t3")); + assert!(!ddl_references.contains("m1")); + assert!(!ddl_references.contains("m2")); + } + + #[test] + fn test_find_ddl_references_for_query_with_mv_on_mv() { + let sql = "WITH WITH0 AS (SELECT * FROM M4) SELECT * FROM WITH0"; + let sql_statements = DDL_AND_DML.to_owned() + sql; + let sql_statements = parse_sql(sql_statements); + let ddl_references = find_ddl_references(&sql_statements); + assert!(ddl_references.contains("m4")); + assert!(ddl_references.contains("m3")); + assert!(ddl_references.contains("t1")); + assert!(ddl_references.contains("t2")); + + assert!(!ddl_references.contains("t3")); + assert!(!ddl_references.contains("m1")); + assert!(!ddl_references.contains("m2")); + } + + #[test] + fn test_find_ddl_references_for_query_joins() { + let sql = "SELECT * FROM (T1 JOIN T2 ON T1.V1 = T2.V2) JOIN T3 ON T2.V1 = T3.V2"; + let sql_statements = DDL_AND_DML.to_owned() + sql; + let sql_statements = parse_sql(sql_statements); + let ddl_references = find_ddl_references(&sql_statements); + assert!(ddl_references.contains("t1")); + assert!(ddl_references.contains("t2")); + assert!(ddl_references.contains("t3")); + + assert!(!ddl_references.contains("m1")); + assert!(!ddl_references.contains("m2")); + assert!(!ddl_references.contains("m3")); + assert!(!ddl_references.contains("m4")); + } + + #[test] + fn test_shrink_values() { + let query = "SELECT 1;"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_simple_table() { + let query = "SELECT * FROM t1;"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T1 (V1 INT, V2 INT, V3 INT); +INSERT INTO T1 VALUES (0, 0, 1); +INSERT INTO T1 VALUES (0, 0, 2); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_simple_table_with_alias() { + let query = "SELECT * FROM t1 AS s1;"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T1 (V1 INT, V2 INT, V3 INT); +INSERT INTO T1 VALUES (0, 0, 1); +INSERT INTO T1 VALUES (0, 0, 2); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_join() { + let query = "SELECT * FROM (T1 JOIN T2 ON T1.V1 = T2.V2) JOIN T3 ON T2.V1 = T3.V2;"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T1 (V1 INT, V2 INT, V3 INT); +CREATE TABLE T2 (V1 INT, V2 INT, V3 INT); +CREATE TABLE T3 (V1 TIMESTAMP, V2 INT, V3 INT); +INSERT INTO T1 VALUES (0, 0, 1); +INSERT INTO T1 VALUES (0, 0, 2); +INSERT INTO T2 VALUES (0, 0, 3); +INSERT INTO T2 VALUES (0, 0, 4); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 5); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 6); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_tumble() { + let query = "SELECT * FROM TUMBLE(T3, V1, INTERVAL '3' DAY);"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T3 (V1 TIMESTAMP, V2 INT, V3 INT); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 5); +INSERT INTO T3 VALUES (TIMESTAMP '00:00:00', 0, 6); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_subquery() { + let query = "SELECT * FROM (SELECT V1 AS K1 FROM T2);"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T2 (V1 INT, V2 INT, V3 INT); +INSERT INTO T2 VALUES (0, 0, 3); +INSERT INTO T2 VALUES (0, 0, 4); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } + + #[test] + fn test_shrink_mview() { + let query = "CREATE MATERIALIZED VIEW m5 AS SELECT * FROM (SELECT V1 AS K1 FROM T2);"; + let sql = DDL_AND_DML.to_owned() + query; + let expected = format!( + "\ +CREATE TABLE T2 (V1 INT, V2 INT, V3 INT); +INSERT INTO T2 VALUES (0, 0, 3); +INSERT INTO T2 VALUES (0, 0, 4); +SET RW_TWO_PHASE_AGG = true; +{query} +" + ); + assert_eq!(expected, shrink(&sql).unwrap()); + } +} diff --git a/src/tests/sqlsmith/src/runner.rs b/src/tests/sqlsmith/src/runner.rs index 167d9e36ee626..b0151089b613c 100644 --- a/src/tests/sqlsmith/src/runner.rs +++ b/src/tests/sqlsmith/src/runner.rs @@ -14,16 +14,16 @@ //! Provides E2E Test runner functionality. -use anyhow; +use anyhow::anyhow; use itertools::Itertools; use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; #[cfg(madsim)] use rand_chacha::ChaChaRng; -use risingwave_common::error::anyhow_error; use tokio_postgres::error::Error as PgError; use tokio_postgres::Client; +use crate::utils::read_file_contents; use crate::validation::is_permissible_error; use crate::{ create_table_statement_to_table, insert_sql_gen, mview_sql_gen, parse_sql, session_sql_gen, @@ -260,7 +260,7 @@ async fn test_sqlsmith( } async fn set_variable(client: &Client, variable: &str, value: &str) -> String { - let s = format!("SET {variable} TO {value};"); + let s = format!("SET {variable} TO {value}"); tracing::info!("[EXECUTING SET_VAR]: {}", s); client.simple_query(&s).await.unwrap(); s @@ -335,7 +335,7 @@ fn get_seed_table_sql(testdata: &str) -> String { let seed_files = vec!["tpch.sql", "nexmark.sql", "alltypes.sql"]; seed_files .iter() - .map(|filename| std::fs::read_to_string(format!("{}/{}", testdata, filename)).unwrap()) + .map(|filename| read_file_contents(format!("{}/{}", testdata, filename)).unwrap()) .collect::() } @@ -345,7 +345,7 @@ async fn create_base_tables(testdata: &str, client: &Client) -> Result(); for stmt in sql.lines() { @@ -426,12 +426,12 @@ fn validate_response<_Row>(response: PgResult<_Row>) -> Result { if let Some(e) = e.as_db_error() && is_permissible_error(&e.to_string()) { - tracing::info!("[SKIPPED ERROR]: {:?}", e); + tracing::info!("[SKIPPED ERROR]: {:#?}", e); return Ok(1); } // consolidate error reason for deterministic test - tracing::info!("[UNEXPECTED ERROR]: {}", e); - Err(anyhow_error!(e)) + tracing::info!("[UNEXPECTED ERROR]: {:#?}", e); + Err(anyhow!("Encountered unexpected error: {e}")) } } } diff --git a/src/tests/sqlsmith/src/sql_gen/relation.rs b/src/tests/sqlsmith/src/sql_gen/relation.rs index 7d103e3d6e478..86e57497bf532 100644 --- a/src/tests/sqlsmith/src/sql_gen/relation.rs +++ b/src/tests/sqlsmith/src/sql_gen/relation.rs @@ -66,6 +66,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { name: alias.as_str().into(), columns: vec![], }), + for_system_time_as_of_now: false, }; table.name = alias; // Rename the table. let columns = table.get_qualified_columns(); diff --git a/src/tests/sqlsmith/src/sql_gen/scalar.rs b/src/tests/sqlsmith/src/sql_gen/scalar.rs index 55661f3ba6ee7..e79fdba9b4dcb 100644 --- a/src/tests/sqlsmith/src/sql_gen/scalar.rs +++ b/src/tests/sqlsmith/src/sql_gen/scalar.rs @@ -148,15 +148,19 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { fn gen_temporal_scalar(&mut self, typ: &DataType) -> String { use DataType as T; - let rand_secs = self.rng.gen_range(2..1000000) as u64; let minute = 60; let hour = 60 * minute; let day = 24 * hour; let week = 7 * day; - let choices = [0, 1, minute, hour, day, week, rand_secs]; - let secs = choices.choose(&mut self.rng).unwrap(); + let choices = [0, 1, minute, hour, day, week]; - let tm = DateTime::::from(SystemTime::now() - Duration::from_secs(*secs)); + let secs = match self.rng.gen_range(1..=100) { + 1..=30 => *choices.choose(&mut self.rng).unwrap(), + 31..=100 => self.rng.gen_range(2..100) as u64, + _ => unreachable!(), + }; + + let tm = DateTime::::from(SystemTime::now() - Duration::from_secs(secs)); match typ { T::Date => tm.format("%F").to_string(), T::Timestamp | T::Timestamptz => tm.format("%Y-%m-%d %H:%M:%S").to_string(), @@ -169,7 +173,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { T::Time => tm.format("%T").to_string(), T::Interval => { if self.rng.gen_bool(0.5) { - (-(*secs as i64)).to_string() + (-(secs as i64)).to_string() } else { secs.to_string() } diff --git a/src/tests/sqlsmith/src/sql_gen/types.rs b/src/tests/sqlsmith/src/sql_gen/types.rs index 9e9535f99b546..561b38d5c26ae 100644 --- a/src/tests/sqlsmith/src/sql_gen/types.rs +++ b/src/tests/sqlsmith/src/sql_gen/types.rs @@ -33,6 +33,7 @@ pub(super) fn data_type_to_ast_data_type(data_type: &DataType) -> AstDataType { DataType::Int16 => AstDataType::SmallInt, DataType::Int32 => AstDataType::Int, DataType::Int64 => AstDataType::BigInt, + DataType::Serial => unreachable!("serial should not be generated"), DataType::Decimal => AstDataType::Decimal(None, None), DataType::Float32 => AstDataType::Real, DataType::Float64 => AstDataType::Double, diff --git a/src/tests/sqlsmith/src/sql_gen/utils.rs b/src/tests/sqlsmith/src/sql_gen/utils.rs index 389c4fe05a54b..565bd2638cf80 100644 --- a/src/tests/sqlsmith/src/sql_gen/utils.rs +++ b/src/tests/sqlsmith/src/sql_gen/utils.rs @@ -74,6 +74,7 @@ pub(crate) fn create_table_factor_from_table(table: &Table) -> TableFactor { TableFactor::Table { name: ObjectName(vec![Ident::new_unchecked(&table.name)]), alias: None, + for_system_time_as_of_now: false, } } diff --git a/src/tests/sqlsmith/src/utils.rs b/src/tests/sqlsmith/src/utils.rs new file mode 100644 index 0000000000000..1541cc9b522a4 --- /dev/null +++ b/src/tests/sqlsmith/src/utils.rs @@ -0,0 +1,43 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fs::File; +use std::io::Write; +use std::path::Path; + +use anyhow::{anyhow, Result}; + +pub(crate) fn read_file_contents>(filepath: P) -> Result { + std::fs::read_to_string(filepath.as_ref()).map_err(|e| { + anyhow!( + "Failed to read contents from {} due to {e}", + filepath.as_ref().display() + ) + }) +} + +pub(crate) fn create_file>(filepath: P) -> Result { + std::fs::File::create(filepath.as_ref()).map_err(|e| { + anyhow!( + "Failed to create file: {} due to {e}", + filepath.as_ref().display() + ) + }) +} + +pub(crate) fn write_to_file>(file: &mut File, contents: S) -> Result<()> { + let s = contents.as_ref().as_bytes(); + file.write_all(s) + .map_err(|e| anyhow!("Failed to write file due to {e}")) +} diff --git a/src/tests/state_cleaning_test/Cargo.toml b/src/tests/state_cleaning_test/Cargo.toml new file mode 100644 index 0000000000000..738b4108b82ec --- /dev/null +++ b/src/tests/state_cleaning_test/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "risingwave_state_cleaning_test" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +keywords = { workspace = true } +license = { workspace = true } +repository = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["workspace-hack"] + +[dependencies] +anyhow = "1" +chrono = "0.4" +clap = { version = "4", features = ["derive"] } +futures = { version = "0.3", default-features = false, features = ["alloc"] } +itertools = "0.10" +regex = "1" +risingwave_rt = { path = "../../utils/runtime" } +serde = { version = "1", features = ["derive"] } +serde_with = "2" +tokio = { version = "0.2", package = "madsim-tokio" } +tokio-postgres = "0.7.7" +tokio-stream = { version = "0.1", features = ["fs"] } +toml = "0.7" +tracing = "0.1" + +[target.'cfg(not(madsim))'.dependencies] +workspace-hack = { path = "../../workspace-hack" } + +[[bin]] +name = "risingwave_state_cleaning_test" +path = "src/bin/main.rs" diff --git a/src/tests/state_cleaning_test/README.md b/src/tests/state_cleaning_test/README.md new file mode 100644 index 0000000000000..bc2fda796881f --- /dev/null +++ b/src/tests/state_cleaning_test/README.md @@ -0,0 +1,18 @@ +# risingwave_state_cleaning_test + +The `risingwave_state_cleaning_test` crate has been designed specifically to test whether RisingWave can effectively clean outdated state records prior to reaching the watermark on time. Its functionality is described using TOML files, which specify the tests that should be executed. By utilizing this crate, developers can ensure that RisingWave is capable of properly managing state records, thereby improving overall application performance and providing a more reliable end-user experience. + +## TOML files + +The TOML files describe the tests that should be run. Each test is represented as a table in the TOML file with the following format: + +```toml +[[test]] +name = "test name" # A human-readable name for the test +init_sqls = [ "SQL statement 1", "SQL statement 2", ... ] # A list of SQL statements to prepare the test environment +bound_tables = [ + { pattern = "table name pattern", limit = number }, # A pattern to match table names and a limit on the number of rows for each table + { pattern = "table name pattern", limit = number }, + ... +] # A list of tables that should be checked. +``` diff --git a/src/tests/state_cleaning_test/data/agg.toml b/src/tests/state_cleaning_test/data/agg.toml new file mode 100644 index 0000000000000..1e47ab77bc54e --- /dev/null +++ b/src/tests/state_cleaning_test/data/agg.toml @@ -0,0 +1,42 @@ +[[test]] +name = "window_hash_agg" +# Prepare the tesing table & mviews. +init_sqls = [ + # Set up the base table. + """ + CREATE TABLE t1 ( + created_at timestamp, + grp int, + v int, + WATERMARK FOR created_at AS created_at - interval '9' second + ) APPEND ONLY WITH ( + connector = 'datagen', + rows_per_second = 100, + datagen.split.num = 16, + fields.created_at.max_past_mode = 'relative', + fields.created_at.max_past = '10s', + fields.grp.min = 0, + fields.grp.max = 5, + ); + """, + # Set up the tumble window mview. + """ + CREATE MATERIALIZED VIEW mv_tumble AS + SELECT grp, SUM(v), window_start + FROM tumble(t1, created_at, INTERVAL '1' SECOND) + GROUP BY window_start, grp; + """, + # Set up the hop window mview. + """ + CREATE MATERIALIZED VIEW mv_hop AS + SELECT grp, SUM(v), window_start + FROM hop(t1, created_at, INTERVAL '1' SECOND, INTERVAL '3' SECOND) + GROUP BY window_start, grp; + """, +] +bound_tables = [ + # Tumble window agg state table. + { pattern = '__internal_mv_tumble_\d+_hashaggresult_\d+', limit = 200 }, + # Hop window agg state table. + { pattern = '__internal_mv_hop_\d+_hashaggresult_\d+', limit = 400 }, +] diff --git a/src/tests/state_cleaning_test/data/join.toml b/src/tests/state_cleaning_test/data/join.toml new file mode 100644 index 0000000000000..76d5379231c99 --- /dev/null +++ b/src/tests/state_cleaning_test/data/join.toml @@ -0,0 +1,54 @@ +[[test]] +name = "window_hash_join" +init_sqls = [ + """ + CREATE TABLE orders ( + order_id INTEGER, + user_id INTEGER, + amount INTEGER, + created_at TIMESTAMP, + WATERMARK FOR created_at AS created_at - interval '9' second + ) APPEND ONLY WITH ( + connector = 'datagen', + rows_per_second = 100, + datagen.split.num = 16, + fields.created_at.max_past_mode = 'relative', + fields.created_at.max_past = '10s', + fields.order_id.kind = 'sequence', + fields.order_id.start = 0, + fields.user_id.min = 0, + fields.user_id.max = 20, + fields.amount.min = 0, + fields.amount.max = 20, + ); + """, + """ + CREATE TABLE clicks ( + click_id INTEGER, + user_id INTEGER, + created_at TIMESTAMP, + WATERMARK FOR created_at AS created_at - interval '9' second + ) APPEND ONLY WITH ( + connector = 'datagen', + rows_per_second = 200, + datagen.split.num = 16, + fields.created_at.max_past_mode = 'relative', + fields.created_at.max_past = '10s', + fields.click_id.kind = 'sequence', + fields.click_id.start = 0, + fields.user_id.min = 0, + fields.user_id.max = 20, + ); + """, + """ + CREATE MATERIALIZED VIEW mv_tumble_join AS + SELECT clicks.window_start, clicks.user_id AS user_id + FROM + TUMBLE(orders, created_at, INTERVAL '1' second) AS orders + JOIN TUMBLE(clicks, created_at, INTERVAL '1' second) AS clicks + ON + orders.window_start = clicks.window_start AND + clicks.user_id = orders.user_id; + """, +] +bound_tables = { pattern = '__internal_mv_tumble_join_\d+_hashjoin(left|right)_\d+', limit = 300 } diff --git a/src/tests/state_cleaning_test/data/temporal_filter.toml b/src/tests/state_cleaning_test/data/temporal_filter.toml new file mode 100644 index 0000000000000..043bb852a7667 --- /dev/null +++ b/src/tests/state_cleaning_test/data/temporal_filter.toml @@ -0,0 +1,45 @@ +[[test]] +name = "temporal_filter" +init_sqls = [ + """ + CREATE TABLE clicks ( + click_id INTEGER, + user_id INTEGER, + created_at TIMESTAMP, + WATERMARK FOR created_at AS created_at - interval '9' second + ) APPEND ONLY WITH ( + connector = 'datagen', + rows_per_second = 200, + datagen.split.num = 16, + fields.created_at.max_past_mode = 'relative', + fields.created_at.max_past = '10s', + fields.click_id.kind = 'sequence', + fields.click_id.start = 0, + fields.user_id.min = 0, + fields.user_id.max = 20, + ); + """, + # Used by now() + """ + SET TIME ZONE LOCAL; + """, + """ + CREATE MATERIALIZED VIEW clicks_10s AS + SELECT * FROM clicks WHERE created_at > now() - INTERVAL '10' second; + """, + """ + CREATE MATERIALIZED VIEW clicks_20s AS + SELECT * FROM clicks WHERE created_at > now() - INTERVAL '20' second; + """, + """ + CREATE MATERIALIZED VIEW clicks_30s AS + SELECT * FROM clicks WHERE created_at > now() - INTERVAL '30' second; + """, +] +bound_tables = [ + { pattern = '__internal_clicks_10s_\d+_dynamicfilterleft_\d+', limit = 300 }, + { pattern = '__internal_clicks_20s_\d+_dynamicfilterleft_\d+', limit = 600 }, + { pattern = '__internal_clicks_30s_\d+_dynamicfilterleft_\d+', limit = 900 }, + # Right table should always only contains 1 record. + { pattern = '__internal_clicks_\d+s_\d+_dynamicfilterright_\d+', limit = 1 }, +] diff --git a/src/tests/state_cleaning_test/src/bin/main.rs b/src/tests/state_cleaning_test/src/bin/main.rs new file mode 100644 index 0000000000000..7c602c0cd8816 --- /dev/null +++ b/src/tests/state_cleaning_test/src/bin/main.rs @@ -0,0 +1,232 @@ +// Copyright 2023 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashSet; +use std::path::PathBuf; +use std::str::FromStr; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use clap::Parser; +use futures::StreamExt; +use regex::Regex; +use serde::Deserialize; +use serde_with::{serde_as, OneOrMany}; +use tokio::fs; +use tokio_postgres::{NoTls, SimpleQueryMessage}; +use tokio_stream::wrappers::ReadDirStream; +use tracing::{debug, error, info}; + +#[derive(clap::Parser, Clone, Debug)] +struct TestOptions { + /// The database server host. + #[clap(long, default_value = "localhost")] + host: String, + + /// The database server port. + #[clap(short, long, default_value = "4566")] + port: u16, + + /// The database name to connect. + #[clap(short, long, default_value = "dev")] + db: String, + + /// The database username. + #[clap(short, long, default_value = "root")] + user: String, + + /// The database password. + #[clap(short = 'w', long, default_value = "")] + pass: String, +} + +#[derive(Debug, Clone, Deserialize)] +struct BoundTable { + pattern: String, + limit: usize, +} + +#[serde_as] +#[derive(Debug, Clone, Deserialize)] +struct TestCase { + name: String, + init_sqls: Vec, + #[serde_as(deserialize_as = "OneOrMany<_>")] + bound_tables: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +struct TestFile { + test: Vec, +} + +async fn validate_case( + client: &tokio_postgres::Client, + TestCase { + name, + init_sqls, + bound_tables, + }: TestCase, +) -> anyhow::Result<()> { + info!(%name, "validating"); + + for sql in init_sqls { + client.simple_query(&sql).await?; + } + + let msgs = client.simple_query("SHOW INTERNAL TABLES").await?; + let internal_tables: HashSet = msgs + .into_iter() + .filter_map(|msg| { + let SimpleQueryMessage::Row(row) = msg else { + return None; + }; + Some(row.get("Name").unwrap().to_string()) + }) + .collect(); + info!(?internal_tables, "found tables"); + + #[derive(Debug)] + struct ProcessedBoundTable { + interested_tables: Vec, + limit: usize, + } + + let tables: Vec<_> = bound_tables + .into_iter() + .map(|t| { + let pattern = Regex::new(&t.pattern).unwrap(); + let interested_tables = internal_tables + .iter() + .filter(|t| pattern.is_match(t)) + .cloned() + .collect::>(); + ProcessedBoundTable { + interested_tables, + limit: t.limit, + } + }) + .collect(); + + info!(?tables, "start checking"); + + const CHECK_COUNT: usize = 100; + const CHECK_INTERVAL: std::time::Duration = std::time::Duration::from_secs(1); + + for i in 0..CHECK_COUNT { + for ProcessedBoundTable { + interested_tables, + limit, + } in &tables + { + for table in interested_tables { + let sql = format!("SELECT COUNT(*) FROM {}", table); + let res = client.query_one(&sql, &[]).await?; + let cnt: i64 = res.get(0); + debug!(iter=i, %table, %cnt, "checking"); + if cnt > *limit as i64 { + anyhow::bail!( + "Table {} has {} rows, which is more than limit {}", + table, + cnt, + limit + ); + } + } + } + + tokio::time::sleep(CHECK_INTERVAL).await; + } + + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + risingwave_rt::init_risingwave_logger(risingwave_rt::LoggerSettings::new()); + + let opt = TestOptions::parse(); + + let conn_builder = tokio_postgres::Config::new() + .host(&opt.host) + .port(opt.port) + .user(&opt.user) + .password(&opt.pass) + .connect_timeout(Duration::from_secs(5)) + .clone(); + + let (main_client, connection) = conn_builder + .clone() + .dbname(&opt.db) + .connect(NoTls) + .await + .unwrap_or_else(|e| panic!("Failed to connect to database: {}", e)); + + tokio::spawn(async move { + if let Err(e) = connection.await { + error!(?e, "connection error"); + } + }); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs(); + + let manifest = env!("CARGO_MANIFEST_DIR"); + + let data_dir = PathBuf::from_str(manifest).unwrap().join("data"); + + let mut st = ReadDirStream::new(fs::read_dir(data_dir).await?) + .map(|path| async { + let path = path?.path(); + let content = tokio::fs::read_to_string(&path).await?; + let test_file: TestFile = toml::from_str(&content)?; + let cases = test_file.test; + + let test_name = path.file_stem().unwrap().to_string_lossy(); + + let cur_db_name = format!("state_cleaning_test_{}_{}", test_name, now); + + main_client + .simple_query(&format!("CREATE DATABASE {}", cur_db_name)) + .await?; + + let (client, connection) = conn_builder + .clone() + .dbname(&cur_db_name) + .connect(NoTls) + .await?; + + info!(%test_name, %cur_db_name, "run test in new database"); + + tokio::spawn(async move { + if let Err(e) = connection.await { + error!(?e, "connection error"); + } + }); + + for case in cases { + validate_case(&client, case).await?; + } + + Ok::<_, anyhow::Error>(()) + }) + .buffer_unordered(16); + + while let Some(res) = st.next().await { + res?; + } + + Ok(()) +} diff --git a/src/udf/README.md b/src/udf/README.md deleted file mode 100644 index 7b0eaa97b194f..0000000000000 --- a/src/udf/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Python UDF Support - -🚧 Working in progress. - -# Usage - -```sh -pip3 install pyarrow -# run server -python3 python/example.py -# run client (test client for the arrow flight UDF client-server protocol) -cargo run --example client -``` - -Risingwave client: - -```sql -dev=> create function gcd(int, int) returns int language python as gcd using link 'http://localhost:8815'; -dev=> select gcd(25, 15); -``` diff --git a/src/udf/python/README.md b/src/udf/python/README.md new file mode 100644 index 0000000000000..8650f301d3ec6 --- /dev/null +++ b/src/udf/python/README.md @@ -0,0 +1,75 @@ +# RisingWave Python API + +This library provides a Python API for creating user-defined functions (UDF) in RisingWave. + +Currently, RisingWave supports user-defined functions implemented as external functions. +Users need to define functions using the API provided by this library, and then start a Python process as a UDF server. +RisingWave calls the function remotely by accessing the UDF server at a given address. + +## Installation + +```sh +pip install risingwave +``` + +## Usage + +Define functions in a Python file: + +```python +# udf.py +from risingwave.udf import udf, udtf, UdfServer + +# Define a scalar function +@udf(input_types=['INT', 'INT'], result_type='INT') +def gcd(x, y): + while y != 0: + (x, y) = (y, x % y) + return x + +# Define a table function +@udtf(input_types='INT', result_types='INT') +def series(n): + for i in range(n): + yield i + +# Start a UDF server +if __name__ == '__main__': + server = UdfServer(location="0.0.0.0:8815") + server.add_function(gcd) + server.add_function(series) + server.serve() +``` + +Start the UDF server: + +```sh +python3 udf.py +``` + +To create functions in RisingWave, use the following syntax: + +```sql +create function ( [, ...] ) + [ returns | returns table ( [, ...] ) ] + language python as + using link ''; +``` + +- The `language` parameter must be set to `python`. +- The `as` parameter specifies the function name defined in the UDF server. +- The `link` parameter specifies the address of the UDF server. + +For example: + +```sql +create function gcd(int, int) returns int +language python as gcd using link 'http://localhost:8815'; + +create function series(int) returns table (x int) +language python as series using link 'http://localhost:8815'; + +select gcd(25, 15); + +select * from series(10); +``` diff --git a/src/udf/python/example.py b/src/udf/python/example.py index b7e6ded7df218..86c4b8716d794 100644 --- a/src/udf/python/example.py +++ b/src/udf/python/example.py @@ -1,4 +1,5 @@ -from risingwave.udf import udf, UdfServer +from typing import Iterator +from risingwave.udf import udf, udtf, UdfServer import random @@ -19,9 +20,23 @@ def gcd3(x: int, y: int, z: int) -> int: return gcd(gcd(x, y), z) +@udtf(input_types='INT', result_types='INT') +def series(n: int) -> Iterator[int]: + for i in range(n): + yield i + + +@udtf(input_types=['INT'], result_types=['INT', 'VARCHAR']) +def series2(n: int) -> Iterator[tuple[int, str]]: + for i in range(n): + yield i, str(i) + + if __name__ == '__main__': - server = UdfServer() + server = UdfServer(location="0.0.0.0:8815") server.add_function(random_int) server.add_function(gcd) server.add_function(gcd3) + server.add_function(series) + server.add_function(series2) server.serve() diff --git a/src/udf/python/risingwave/udf.py b/src/udf/python/risingwave/udf.py index 93d47f29df4da..55ff4bb99d2cd 100644 --- a/src/udf/python/risingwave/udf.py +++ b/src/udf/python/risingwave/udf.py @@ -39,9 +39,30 @@ def eval_batch(self, batch: pa.RecordBatch) -> pa.RecordBatch: return pa.RecordBatch.from_arrays([result], schema=self._result_schema) -class UserDefinedFunctionWrapper(ScalarFunction): +class TableFunction(UserDefinedFunction): """ - Base Wrapper for Python user-defined function. + Base interface for user-defined table function. A user-defined table functions maps zero, one, + or multiple table values to a new table value. + """ + + def eval(self, *args): + """ + Method which defines the logic of the table function. + """ + pass + + def eval_batch(self, batch: pa.RecordBatch) -> pa.RecordBatch: + # only the first row from batch is used + res = self.eval(*[col[0].as_py() for col in batch]) + columns = zip(*res) if len(self._result_schema) > 1 else [res] + arrays = [pa.array(col, type) + for col, type in zip(columns, self._result_schema.types)] + return pa.RecordBatch.from_arrays(arrays, schema=self._result_schema) + + +class UserDefinedScalarFunctionWrapper(ScalarFunction): + """ + Base Wrapper for Python user-defined scalar function. """ _func: Callable @@ -49,7 +70,7 @@ def __init__(self, func, input_types, result_type, name=None): self._func = func self._input_schema = pa.schema(zip( inspect.getfullargspec(func)[0], - [_to_data_type(t) for t in input_types] + [_to_data_type(t) for t in _to_list(input_types)] )) self._result_schema = pa.schema( [('output', _to_data_type(result_type))]) @@ -63,38 +84,112 @@ def eval(self, *args): return self._func(*args) -def _create_udf(f, input_types, result_type, name): - return UserDefinedFunctionWrapper( - f, input_types, result_type, name) +class UserDefinedTableFunctionWrapper(TableFunction): + """ + Base Wrapper for Python user-defined table function. + """ + _func: Callable + + def __init__(self, func, input_types, result_types, name=None): + self._func = func + self._input_schema = pa.schema(zip( + inspect.getfullargspec(func)[0], + [_to_data_type(t) for t in _to_list(input_types)] + )) + self._result_schema = pa.schema( + [('', _to_data_type(t)) for t in _to_list(result_types)]) + self._name = name or ( + func.__name__ if hasattr(func, '__name__') else func.__class__.__name__) + + def __call__(self, *args): + return self._func(*args) + + def eval(self, *args): + return self._func(*args) + + +def _to_list(x): + if isinstance(x, list): + return x + else: + return [x] def udf(input_types: Union[List[Union[str, pa.DataType]], Union[str, pa.DataType]], result_type: Union[str, pa.DataType], name: Optional[str] = None,) -> Union[Callable, UserDefinedFunction]: """ - Annotation for creating a user-defined function. + Annotation for creating a user-defined scalar function. + + Parameters: + - input_types: A list of strings or Arrow data types that specifies the input data types. + - result_type: A string or an Arrow data type that specifies the return value type. + - name: An optional string specifying the function name. If not provided, the original name will be used. + + Example: + ``` + @udf(input_types=['INT', 'INT'], result_type='INT') + def gcd(x, y): + while y != 0: + (x, y) = (y, x % y) + return x + ``` + """ + + return lambda f: UserDefinedScalarFunctionWrapper(f, input_types, result_type, name) + + +def udtf(input_types: Union[List[Union[str, pa.DataType]], Union[str, pa.DataType]], + result_types: Union[List[Union[str, pa.DataType]], Union[str, pa.DataType]], + name: Optional[str] = None,) -> Union[Callable, UserDefinedFunction]: """ + Annotation for creating a user-defined table function. - return lambda f: _create_udf(f, input_types, result_type, name) + Parameters: + - input_types: A list of strings or Arrow data types that specifies the input data types. + - result_types A list of strings or Arrow data types that specifies the return value types. + - name: An optional string specifying the function name. If not provided, the original name will be used. + + Example: + ``` + @udtf(input_types='INT', result_types='INT') + def series(n): + for i in range(n): + yield i + ``` + """ + + return lambda f: UserDefinedTableFunctionWrapper(f, input_types, result_types, name) class UdfServer(pa.flight.FlightServerBase): """ - UDF server based on Apache Arrow Flight protocol. - Reference: https://arrow.apache.org/cookbook/py/flight.html#simple-parquet-storage-service-with-arrow-flight + A server that provides user-defined functions to clients. + + Example: + ``` + server = UdfServer(location="0.0.0.0:8815") + server.add_function(my_udf) + server.serve() + ``` """ + # UDF server based on Apache Arrow Flight protocol. + # Reference: https://arrow.apache.org/cookbook/py/flight.html#simple-parquet-storage-service-with-arrow-flight + _functions: Dict[str, UserDefinedFunction] - def __init__(self, location="grpc://0.0.0.0:8815", **kwargs): - super(UdfServer, self).__init__(location, **kwargs) + def __init__(self, location="0.0.0.0:8815", **kwargs): + super(UdfServer, self).__init__('grpc://' + location, **kwargs) self._functions = {} def get_flight_info(self, context, descriptor): """Return the result schema of a function.""" udf = self._functions[descriptor.path[0].decode('utf-8')] # return the concatenation of input and output schema - full_schema = udf._input_schema.append(udf._result_schema.field(0)) - return pa.flight.FlightInfo(schema=full_schema, descriptor=descriptor, endpoints=[], total_records=0, total_bytes=0) + full_schema = pa.schema( + list(udf._input_schema) + list(udf._result_schema)) + # we use `total_records` to indicate the number of input arguments + return pa.flight.FlightInfo(schema=full_schema, descriptor=descriptor, endpoints=[], total_records=len(udf._input_schema), total_bytes=0) def add_function(self, udf: UserDefinedFunction): """Add a function to the server.""" diff --git a/src/udf/python/setup.py b/src/udf/python/setup.py new file mode 100644 index 0000000000000..934d4c1078eae --- /dev/null +++ b/src/udf/python/setup.py @@ -0,0 +1,21 @@ +from setuptools import find_packages, setup + +with open("README.md", "r") as fh: + long_description = fh.read() + +setup( + name="risingwave", + version="0.0.2", + author="RisingWave Labs", + description="RisingWave Python API", + long_description=long_description, + long_description_content_type='text/markdown', + url="https://github.com/risingwavelabs/risingwave", + packages=find_packages(), + classifiers=[ + "Programming Language :: Python", + "License :: OSI Approved :: Apache Software License" + ], + python_requires=">=3.10", + install_requires=['pyarrow'], +) diff --git a/src/udf/src/lib.rs b/src/udf/src/lib.rs index 748a0c3224f69..00bc7073d9b90 100644 --- a/src/udf/src/lib.rs +++ b/src/udf/src/lib.rs @@ -44,11 +44,10 @@ impl ArrowFlightUdfClient { // check schema let info = response.into_inner(); + let input_num = info.total_records as usize; let full_schema = Schema::try_from(info) .map_err(|e| FlightError::DecodeError(format!("Error decoding schema: {e}")))?; - // TODO: only support one return value for now - let (input_fields, return_fields) = - full_schema.fields.split_at(full_schema.fields.len() - 1); + let (input_fields, return_fields) = full_schema.fields.split_at(input_num); let actual_input_types: Vec<_> = input_fields.iter().map(|f| f.data_type()).collect(); let actual_result_types: Vec<_> = return_fields.iter().map(|f| f.data_type()).collect(); let expect_input_types: Vec<_> = args.fields.iter().map(|f| f.data_type()).collect(); diff --git a/src/utils/pgwire/Cargo.toml b/src/utils/pgwire/Cargo.toml index 381aa8e84d20e..58170963f716c 100644 --- a/src/utils/pgwire/Cargo.toml +++ b/src/utils/pgwire/Cargo.toml @@ -22,7 +22,7 @@ bytes = "1" chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } futures = { version = "0.3", default-features = false, features = ["alloc"] } itertools = "0.10" -openssl = "0.10.3" +openssl = "0.10.48" pg_interval = "0.4" postgres-types = { version = "0.2.4", features = ["derive","with-chrono-0_4"] } regex = "1.5" diff --git a/src/utils/pgwire/src/pg_extended.rs b/src/utils/pgwire/src/pg_extended.rs index 6fb6aa4b24e87..32471659ff9f4 100644 --- a/src/utils/pgwire/src/pg_extended.rs +++ b/src/utils/pgwire/src/pg_extended.rs @@ -521,7 +521,7 @@ impl PreparedStatement { }; format!("'{}'::JSONB", tmp) } - DataType::Struct(_) | DataType::List { .. } => { + DataType::Serial | DataType::Struct(_) | DataType::List { .. } => { return Err(PsqlError::Internal(anyhow!( "Unsupported param type {:?}", type_oid @@ -557,7 +557,7 @@ impl PreparedStatement { } DataType::Interval => params.push("'2 months ago'::interval".to_string()), DataType::Jsonb => params.push("'null'::JSONB".to_string()), - DataType::Struct(_) | DataType::List { .. } => { + DataType::Serial | DataType::Struct(_) | DataType::List { .. } => { return Err(PsqlError::Internal(anyhow!( "Unsupported param type {:?}", oid diff --git a/src/utils/pgwire/src/pg_protocol.rs b/src/utils/pgwire/src/pg_protocol.rs index 1ad8962afe0e1..392c2bd956243 100644 --- a/src/utils/pgwire/src/pg_protocol.rs +++ b/src/utils/pgwire/src/pg_protocol.rs @@ -394,7 +394,12 @@ where async fn process_parse_msg(&mut self, msg: FeParseMessage) -> PsqlResult<()> { let sql = cstr_to_str(&msg.sql_bytes).unwrap(); - tracing::trace!("(extended query)parse query: {}", sql); + let statement_name = cstr_to_str(&msg.statement_name).unwrap().to_string(); + tracing::trace!( + "(extended query)parse query: {}, statement name: {}", + sql, + statement_name + ); let is_query_sql = { let stmts = Parser::parse_sql(sql) @@ -407,8 +412,12 @@ where )); } - StatementType::infer_from_statement(&stmts[0]) - .map_or(false, |stmt_type| stmt_type.is_query()) + if stmts.is_empty() { + false + } else { + StatementType::infer_from_statement(&stmts[0]) + .map_or(false, |stmt_type| stmt_type.is_query()) + } }; let prepared_statement = PreparedStatement::parse_statement(sql.to_string(), msg.type_ids)?; @@ -426,12 +435,7 @@ where vec![] }; - let statement = PgStatement::new( - cstr_to_str(&msg.statement_name).unwrap().to_string(), - prepared_statement, - fields, - is_query_sql, - ); + let statement = PgStatement::new(statement_name, prepared_statement, fields, is_query_sql); let name = statement.name(); if name.is_empty() { @@ -445,11 +449,12 @@ where fn process_bind_msg(&mut self, msg: FeBindMessage) -> PsqlResult<()> { let statement_name = cstr_to_str(&msg.statement_name).unwrap().to_string(); + let portal_name = cstr_to_str(&msg.portal_name).unwrap().to_string(); // 1. Get statement. trace!( target: "pgwire_query_log", - "(extended query)bind: get statement name: {}", - &statement_name + "(extended query)bind: statement name: {}, portal name: {}", + &statement_name,&portal_name ); let statement = if statement_name.is_empty() { self.unnamed_statement @@ -473,7 +478,6 @@ where .try_collect()?; // 2. Instance the statement to get the portal. - let portal_name = cstr_to_str(&msg.portal_name).unwrap().to_string(); let portal = statement.instance( portal_name.clone(), &msg.params, @@ -505,7 +509,7 @@ where .ok_or_else(PsqlError::no_portal)? }; - tracing::trace!(target: "pgwire_query_log", "(extended query)execute query: {}", portal.query_string()); + tracing::trace!(target: "pgwire_query_log", "(extended query)execute query: {}, portal name: {}", portal.query_string(),portal_name); // 2. Execute instance statement using portal. let session = self.session.clone().unwrap(); diff --git a/src/utils/pgwire/src/pg_response.rs b/src/utils/pgwire/src/pg_response.rs index 67943f70e594a..a5572f0e4abe2 100644 --- a/src/utils/pgwire/src/pg_response.rs +++ b/src/utils/pgwire/src/pg_response.rs @@ -63,7 +63,11 @@ pub enum StatementType { DROP_SCHEMA, DROP_DATABASE, DROP_USER, + ALTER_INDEX, + ALTER_VIEW, ALTER_TABLE, + ALTER_MATERIALIZED_VIEW, + ALTER_SINK, ALTER_SYSTEM, REVOKE_PRIVILEGE, // Introduce ORDER_BY statement type cuz Calcite unvalidated AST has SqlKind.ORDER_BY. Note @@ -82,6 +86,7 @@ pub enum StatementType { BEGIN, COMMIT, ROLLBACK, + SET_TRANSACTION, } impl std::fmt::Display for StatementType { diff --git a/src/utils/pgwire/tests/js/test/pgwire.test.ts b/src/utils/pgwire/tests/js/test/pgwire.test.ts index 76d1c1f27d0ec..07cb279728ff2 100644 --- a/src/utils/pgwire/tests/js/test/pgwire.test.ts +++ b/src/utils/pgwire/tests/js/test/pgwire.test.ts @@ -11,8 +11,6 @@ describe('PgwireTest', () => { try { const conn = await pool.connect(); try { - // FIXME: RisingWave currently will fail on this test due to the lacking support of prepared statement. - // Related issue: https://github.com/risingwavelabs/risingwave/issues/5293 const res = await conn.query({ text: 'SELECT $1::int AS number', values: ['1'], @@ -25,4 +23,29 @@ describe('PgwireTest', () => { await pool.end(); } }); + + test('empty query', async () => { + const pool = new Pool({ + host: '127.0.0.1', + database: 'dev', + port: 4566, + user: 'root', + }); + try { + const conn = await pool.connect(); + try { + const query = { + name: 'empty', + text: '', + values: [], + }; + const res = await conn.query(query); + expect(res.rowCount).toBe(null); + } finally { + await conn.release(); + } + } finally { + await pool.end(); + } + }); }); diff --git a/src/utils/runtime/src/lib.rs b/src/utils/runtime/src/lib.rs index 9a1c22f6feaf9..c7a4c176ddbcc 100644 --- a/src/utils/runtime/src/lib.rs +++ b/src/utils/runtime/src/lib.rs @@ -145,6 +145,7 @@ pub fn init_risingwave_logger(settings: LoggerSettings) { }; let filter = filter::Targets::new() + .with_target("aws_sdk_ec2", Level::INFO) .with_target("aws_sdk_s3", Level::INFO) .with_target("aws_config", Level::WARN) // Only enable WARN and ERROR for 3rd-party crates diff --git a/src/workspace-hack/Cargo.toml b/src/workspace-hack/Cargo.toml index ecd4ff11bc05a..4b425b9fbe8b8 100644 --- a/src/workspace-hack/Cargo.toml +++ b/src/workspace-hack/Cargo.toml @@ -21,7 +21,6 @@ publish = false ahash = { version = "0.8" } anyhow = { version = "1", features = ["backtrace"] } arrayvec = { version = "0.7", default-features = false, features = ["std"] } -auto_enums = { version = "0.7", features = ["futures"] } aws-sdk-s3 = { version = "0.21", features = ["native-tls"] } aws-smithy-client = { version = "0.51", default-features = false, features = ["native-tls", "rustls"] } aws-types = { version = "0.51", default-features = false, features = ["hardcoded-credentials"] } @@ -35,7 +34,6 @@ crossbeam-channel = { version = "0.5" } crossbeam-deque = { version = "0.8" } crossbeam-epoch = { version = "0.9" } crossbeam-utils = { version = "0.8" } -digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fail = { version = "0.5", default-features = false, features = ["failpoints"] } fixedbitset = { version = "0.4" } @@ -74,6 +72,7 @@ parking_lot_core = { version = "0.9", default-features = false, features = ["dea petgraph = { version = "0.6" } phf = { version = "0.11", features = ["uncased"] } phf_shared = { version = "0.11", features = ["uncased"] } +postgres-types = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc", default-features = false, features = ["derive", "with-chrono-0_4", "with-serde_json-1"] } prometheus = { version = "0.13", features = ["process"] } prost = { version = "0.11", features = ["no-recursion-limit"] } prost-types = { version = "0.11" } @@ -84,19 +83,22 @@ regex = { version = "1" } regex-syntax = { version = "0.6" } reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] } ring = { version = "0.16", features = ["std"] } +rust_decimal = { version = "1", features = ["db-postgres", "db-tokio-postgres"] } scopeguard = { version = "1" } serde = { version = "1", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1", features = ["alloc"] } +serde_with = { version = "2", features = ["json"] } smallvec = { version = "1", default-features = false, features = ["serde"] } socket2 = { version = "0.4", default-features = false, features = ["all"] } strum = { version = "0.24", features = ["derive"] } subtle = { version = "2" } time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "parking_lot", "process", "rt-multi-thread", "signal", "stats", "sync", "time", "tracing"] } -tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0c25710", features = ["net"] } +tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc", features = ["with-chrono-0_4"] } +tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0c25710", features = ["fs", "net"] } tokio-util = { version = "0.7", features = ["codec", "io"] } tonic = { version = "0.8", features = ["gzip", "tls-webpki-roots"] } tower = { version = "0.4", features = ["balance", "buffer", "filter", "limit", "load-shed", "retry", "timeout", "util"] } -tower-http = { version = "0.3", features = ["add-extension", "cors", "map-response-body", "util"] } tracing = { version = "0.1", features = ["log", "release_max_level_trace"] } tracing-core = { version = "0.1" } tracing-futures = { version = "0.2" } @@ -111,8 +113,7 @@ zstd-sys = { version = "2", default-features = false, features = ["legacy", "std ahash = { version = "0.8" } anyhow = { version = "1", features = ["backtrace"] } arrayvec = { version = "0.7", default-features = false, features = ["std"] } -auto_enums = { version = "0.7", features = ["futures"] } -auto_enums_derive = { version = "0.7", default-features = false, features = ["futures", "std"] } +auto_enums = { version = "0.8", features = ["futures03"] } aws-sdk-s3 = { version = "0.21", features = ["native-tls"] } aws-smithy-client = { version = "0.51", default-features = false, features = ["native-tls", "rustls"] } aws-types = { version = "0.51", default-features = false, features = ["hardcoded-credentials"] } @@ -127,7 +128,6 @@ crossbeam-channel = { version = "0.5" } crossbeam-deque = { version = "0.8" } crossbeam-epoch = { version = "0.9" } crossbeam-utils = { version = "0.8" } -digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fail = { version = "0.5", default-features = false, features = ["failpoints"] } fixedbitset = { version = "0.4" } @@ -166,6 +166,7 @@ parking_lot_core = { version = "0.9", default-features = false, features = ["dea petgraph = { version = "0.6" } phf = { version = "0.11", features = ["uncased"] } phf_shared = { version = "0.11", features = ["uncased"] } +postgres-types = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc", default-features = false, features = ["derive", "with-chrono-0_4", "with-serde_json-1"] } proc-macro2 = { version = "1", features = ["span-locations"] } prometheus = { version = "0.13", features = ["process"] } prost = { version = "0.11", features = ["no-recursion-limit"] } @@ -177,8 +178,11 @@ regex = { version = "1" } regex-syntax = { version = "0.6" } reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] } ring = { version = "0.16", features = ["std"] } +rust_decimal = { version = "1", features = ["db-postgres", "db-tokio-postgres"] } scopeguard = { version = "1" } serde = { version = "1", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1", features = ["alloc"] } +serde_with = { version = "2", features = ["json"] } smallvec = { version = "1", default-features = false, features = ["serde"] } socket2 = { version = "0.4", default-features = false, features = ["all"] } strum = { version = "0.24", features = ["derive"] } @@ -186,12 +190,12 @@ subtle = { version = "2" } syn = { version = "1", features = ["extra-traits", "full", "visit", "visit-mut"] } time = { version = "0.3", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "parking_lot", "process", "rt-multi-thread", "signal", "stats", "sync", "time", "tracing"] } -tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0c25710", features = ["net"] } +tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "87ca1dc", features = ["with-chrono-0_4"] } +tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0c25710", features = ["fs", "net"] } tokio-util = { version = "0.7", features = ["codec", "io"] } tonic = { version = "0.8", features = ["gzip", "tls-webpki-roots"] } tonic-build = { version = "0.8" } tower = { version = "0.4", features = ["balance", "buffer", "filter", "limit", "load-shed", "retry", "timeout", "util"] } -tower-http = { version = "0.3", features = ["add-extension", "cors", "map-response-body", "util"] } tracing = { version = "0.1", features = ["log", "release_max_level_trace"] } tracing-core = { version = "0.1" } tracing-futures = { version = "0.2" }