diff --git a/Gopkg.lock b/Gopkg.lock index 46f4b8665436..119a2271d642 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -458,7 +458,7 @@ [[projects]] branch = "master" - digest = "1:01f450f2b42cdd2e3eac4a0ff01e0eccc287b5e6c590dbb592b3a320cd29cce0" + digest = "1:085203aa725af444d832f6f0e38511e7a2a7bf7d630f618257e467fb043ea109" name = "github.com/cockroachdb/pebble" packages = [ ".", @@ -484,7 +484,7 @@ "vfs", ] pruneopts = "UT" - revision = "4887c526300055e1c30635c53fd16b3fe9d9e132" + revision = "feb93032c41f991845506cd423f1771618edf2b0" [[projects]] branch = "master" @@ -534,6 +534,21 @@ revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1" version = "v1.0.8" +[[projects]] + branch = "master" + digest = "1:6c25d97f154e54290d708120e6bc72da4e8628e1467c53d8f723f1594043901d" + name = "github.com/dave/dst" + packages = [ + ".", + "decorator", + "decorator/resolver", + "decorator/resolver/gopackages", + "decorator/resolver/gotypes", + "dstutil", + ] + pruneopts = "UT" + revision = "ce1c8af3ca7fccd4405ec0594a8b68d40370dda0" + [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -1600,7 +1615,7 @@ version = "v1.4.0" [[projects]] - digest = "1:c8fafded0eb90c84e4ef05b9e1ff2a5c8fb2aaf5009ba11b91f6bed2ef680aae" + digest = "1:5b7e59f9330bbc88c3aa7b62d92d1d29bb6ccbc176b731986de70345f3522dd7" name = "github.com/twpayne/go-geom" packages = [ ".", @@ -1621,8 +1636,8 @@ "xy/orientation", ] pruneopts = "UT" - revision = "078e8ab21d838b07c627dc1d704f1f80688b1b90" - version = "v1.1.0" + revision = "62c03a64717d682dd89319176fd7247944f0c518" + version = "v1.2.1" [[projects]] digest = "1:43e0db2b113d1aee4bb68745598c135511976a44fb380ebd701cd0c14a77c303" @@ -2131,6 +2146,7 @@ "github.com/apache/arrow/go/arrow/memory", "github.com/armon/circbuf", "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/awserr", "github.com/aws/aws-sdk-go/aws/credentials", "github.com/aws/aws-sdk-go/aws/session", "github.com/aws/aws-sdk-go/service/s3", @@ -2164,6 +2180,9 @@ "github.com/cockroachdb/stress", "github.com/cockroachdb/ttycolor", "github.com/codahale/hdrhistogram", + "github.com/dave/dst", + "github.com/dave/dst/decorator", + "github.com/dave/dst/dstutil", "github.com/docker/distribution/reference", "github.com/docker/docker/api/types", "github.com/docker/docker/api/types/container", diff --git a/Gopkg.toml b/Gopkg.toml index 3767d9005afb..3c496c027006 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,6 +58,10 @@ ignored = [ name = "github.com/docker/docker" branch = "master" +[[constraint]] + name = "github.com/dave/dst" + branch = "master" + [[constraint]] name = "github.com/maruel/panicparse" revision = "f20d4c4d746f810c9110e21928d4135e1f2a3efa" diff --git a/Makefile b/Makefile index e992478db232..0846df9d7685 100644 --- a/Makefile +++ b/Makefile @@ -482,7 +482,7 @@ LIBSNAPPY := $(SNAPPY_DIR)/libsnappy.a LIBEDIT := $(LIBEDIT_DIR)/src/.libs/libedit.a LIBROACH := $(LIBROACH_DIR)/libroach.a LIBROACHCCL := $(LIBROACH_DIR)/libroachccl.a -LIBPROJ := $(PROJ_DIR)/lib/libproj.a +LIBPROJ := $(PROJ_DIR)/lib/libproj$(if $(target-is-windows),_4_9).a LIBKRB5 := $(KRB5_DIR)/lib/libgssapi_krb5.a PROTOC := $(PROTOC_DIR)/protoc @@ -553,7 +553,7 @@ $(BASE_CGO_FLAGS_FILES): Makefile build/defs.mk.sig | bin/.submodules-initialize @echo >> $@ @echo 'package $(if $($(@D)-package),$($(@D)-package),$(notdir $(@D)))' >> $@ @echo >> $@ - @echo '// #cgo CPPFLAGS: $(addprefix -I,$(JEMALLOC_DIR)/include $(KRB_CPPFLAGS) $(GEOS_DIR)/capi $(PROJ_DIR)/lib)' >> $@ + @echo '// #cgo CPPFLAGS: $(addprefix -I,$(JEMALLOC_DIR)/include $(KRB_CPPFLAGS))' >> $@ @echo '// #cgo LDFLAGS: $(addprefix -L,$(CRYPTOPP_DIR) $(PROTOBUF_DIR) $(JEMALLOC_DIR)/lib $(SNAPPY_DIR) $(LIBEDIT_DIR)/src/.libs $(ROCKSDB_DIR) $(LIBROACH_DIR) $(KRB_DIR) $(PROJ_DIR)/lib)' >> $@ @echo 'import "C"' >> $@ @@ -825,7 +825,7 @@ SQLPARSER_TARGETS = \ PROTOBUF_TARGETS := bin/.go_protobuf_sources bin/.gw_protobuf_sources bin/.cpp_protobuf_sources bin/.cpp_ccl_protobuf_sources -DOCGEN_TARGETS := bin/.docgen_bnfs bin/.docgen_functions +DOCGEN_TARGETS := bin/.docgen_bnfs bin/.docgen_functions docs/generated/redact_safe.md EXECGEN_TARGETS = \ pkg/col/coldata/vec.eg.go \ @@ -972,7 +972,7 @@ buildshort: ## Build the CockroachDB binary without the admin UI. build: $(COCKROACH) buildoss: $(COCKROACHOSS) buildshort: $(COCKROACHSHORT) -build buildoss buildshort: $(DOCGEN_TARGETS) +build buildoss buildshort: $(if $(is-cross-compile),,$(DOCGEN_TARGETS)) build buildshort: $(if $(is-cross-compile),,$(SETTINGS_DOC_PAGE)) # For historical reasons, symlink cockroach to cockroachshort. @@ -1528,6 +1528,19 @@ bin/.docgen_functions: bin/docgen docgen functions docs/generated/sql --quiet touch $@ +.PHONY: docs/generated/redact_safe.md + +docs/generated/redact_safe.md: + @(echo "The following types are considered always safe for reporting:"; echo; \ + echo "File | Type"; echo "--|--") >$@.tmp + @git grep '^func \(.*\) SafeValue\(\)' | \ + grep -v '^pkg/util/redact' | \ + sed -E -e 's/^([^:]*):func \(([^ ]* )?(.*)\) SafeValue.*$$/\1 | \`\3\`/g' >>$@.tmp || rm -f $@.tmp + @git grep 'redact\.RegisterSafeType' | \ + grep -v '^pkg/util/redact' | \ + sed -E -e 's/^([^:]*):.*redact\.RegisterSafeType\((.*)\).*/\1 | \`\2\`/g' >>$@.tmp || rm -f $@.tmp + @mv -f $@.tmp $@ + settings-doc-gen := $(if $(filter buildshort,$(MAKECMDGOALS)),$(COCKROACHSHORT),$(COCKROACH)) $(SETTINGS_DOC_PAGE): $(settings-doc-gen) @@ -1671,6 +1684,7 @@ bins = \ bin/benchmark \ bin/cockroach-oss \ bin/cockroach-short \ + bin/compile-builds \ bin/docgen \ bin/execgen \ bin/fuzz \ @@ -1715,7 +1729,8 @@ logictest-bins := bin/logictest bin/logictestopt bin/logictestccl # Additional dependencies for binaries that depend on generated code. # # TODO(benesch): Derive this automatically. This is getting out of hand. -bin/workload bin/docgen bin/execgen bin/roachtest $(logictest-bins): $(LIBPROJ) $(CGO_FLAGS_FILES) $(SQLPARSER_TARGETS) $(PROTOBUF_TARGETS) +bin/workload bin/docgen bin/execgen bin/roachtest $(logictest-bins): $(SQLPARSER_TARGETS) $(PROTOBUF_TARGETS) +bin/workload bin/docgen bin/roachtest $(logictest-bins): $(LIBPROJ) $(CGO_FLAGS_FILES) bin/workload bin/roachtest $(logictest-bins): $(EXECGEN_TARGETS) bin/roachtest $(logictest-bins): $(C_LIBS_CCL) $(CGO_FLAGS_FILES) $(OPTGEN_TARGETS) diff --git a/build/teamcity-compile-builds.sh b/build/teamcity-compile-builds.sh new file mode 100644 index 000000000000..4c66ac36197e --- /dev/null +++ b/build/teamcity-compile-builds.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -euxo pipefail + +export BUILDER_HIDE_GOPATH_SRC=1 + +build/builder.sh go install ./pkg/cmd/compile-builds +build/builder.sh env \ + compile-builds diff --git a/c-deps/libroach/protos/util/log/log.pb.cc b/c-deps/libroach/protos/util/log/log.pb.cc index 331a03b9b171..7c9786787966 100644 --- a/c-deps/libroach/protos/util/log/log.pb.cc +++ b/c-deps/libroach/protos/util/log/log.pb.cc @@ -121,6 +121,9 @@ const int Entry::kGoroutineFieldNumber; const int Entry::kFileFieldNumber; const int Entry::kLineFieldNumber; const int Entry::kMessageFieldNumber; +const int Entry::kTagsFieldNumber; +const int Entry::kCounterFieldNumber; +const int Entry::kRedactableFieldNumber; #endif // !defined(_MSC_VER) || _MSC_VER >= 1900 Entry::Entry() @@ -142,18 +145,23 @@ Entry::Entry(const Entry& from) if (from.message().size() > 0) { message_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.message_); } + tags_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + if (from.tags().size() > 0) { + tags_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.tags_); + } ::memcpy(&time_, &from.time_, - static_cast(reinterpret_cast(&severity_) - - reinterpret_cast(&time_)) + sizeof(severity_)); + static_cast(reinterpret_cast(&counter_) - + reinterpret_cast(&time_)) + sizeof(counter_)); // @@protoc_insertion_point(copy_constructor:cockroach.util.log.Entry) } void Entry::SharedCtor() { file_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); message_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + tags_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ::memset(&time_, 0, static_cast( - reinterpret_cast(&severity_) - - reinterpret_cast(&time_)) + sizeof(severity_)); + reinterpret_cast(&counter_) - + reinterpret_cast(&time_)) + sizeof(counter_)); } Entry::~Entry() { @@ -164,6 +172,7 @@ Entry::~Entry() { void Entry::SharedDtor() { file_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); message_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + tags_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); } void Entry::SetCachedSize(int size) const { @@ -183,9 +192,10 @@ void Entry::Clear() { file_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); message_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + tags_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ::memset(&time_, 0, static_cast( - reinterpret_cast(&severity_) - - reinterpret_cast(&time_)) + sizeof(severity_)); + reinterpret_cast(&counter_) - + reinterpret_cast(&time_)) + sizeof(counter_)); _internal_metadata_.Clear(); } @@ -294,6 +304,50 @@ bool Entry::MergePartialFromCodedStream( break; } + // string tags = 7; + case 7: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(58u /* 58 & 0xFF */)) { + DO_(::google::protobuf::internal::WireFormatLite::ReadString( + input, this->mutable_tags())); + DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String( + this->tags().data(), static_cast(this->tags().length()), + ::google::protobuf::internal::WireFormatLite::PARSE, + "cockroach.util.log.Entry.tags")); + } else { + goto handle_unusual; + } + break; + } + + // uint64 counter = 8; + case 8: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(64u /* 64 & 0xFF */)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>( + input, &counter_))); + } else { + goto handle_unusual; + } + break; + } + + // bool redactable = 9; + case 9: { + if (static_cast< ::google::protobuf::uint8>(tag) == + static_cast< ::google::protobuf::uint8>(72u /* 72 & 0xFF */)) { + + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>( + input, &redactable_))); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -361,6 +415,26 @@ void Entry::SerializeWithCachedSizes( ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->goroutine(), output); } + // string tags = 7; + if (this->tags().size() > 0) { + ::google::protobuf::internal::WireFormatLite::VerifyUtf8String( + this->tags().data(), static_cast(this->tags().length()), + ::google::protobuf::internal::WireFormatLite::SERIALIZE, + "cockroach.util.log.Entry.tags"); + ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( + 7, this->tags(), output); + } + + // uint64 counter = 8; + if (this->counter() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteUInt64(8, this->counter(), output); + } + + // bool redactable = 9; + if (this->redactable() != 0) { + ::google::protobuf::internal::WireFormatLite::WriteBool(9, this->redactable(), output); + } + output->WriteRaw((::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()).data(), static_cast((::google::protobuf::internal::GetProto3PreserveUnknownsDefault() ? _internal_metadata_.unknown_fields() : _internal_metadata_.default_instance()).size())); // @@protoc_insertion_point(serialize_end:cockroach.util.log.Entry) @@ -386,6 +460,13 @@ size_t Entry::ByteSizeLong() const { this->message()); } + // string tags = 7; + if (this->tags().size() > 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::StringSize( + this->tags()); + } + // int64 time = 2; if (this->time() != 0) { total_size += 1 + @@ -400,6 +481,17 @@ size_t Entry::ByteSizeLong() const { this->line()); } + // .cockroach.util.log.Severity severity = 1; + if (this->severity() != 0) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::EnumSize(this->severity()); + } + + // bool redactable = 9; + if (this->redactable() != 0) { + total_size += 1 + 1; + } + // int64 goroutine = 6; if (this->goroutine() != 0) { total_size += 1 + @@ -407,10 +499,11 @@ size_t Entry::ByteSizeLong() const { this->goroutine()); } - // .cockroach.util.log.Severity severity = 1; - if (this->severity() != 0) { + // uint64 counter = 8; + if (this->counter() != 0) { total_size += 1 + - ::google::protobuf::internal::WireFormatLite::EnumSize(this->severity()); + ::google::protobuf::internal::WireFormatLite::UInt64Size( + this->counter()); } int cached_size = ::google::protobuf::internal::ToCachedSize(total_size); @@ -438,17 +531,27 @@ void Entry::MergeFrom(const Entry& from) { message_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.message_); } + if (from.tags().size() > 0) { + + tags_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.tags_); + } if (from.time() != 0) { set_time(from.time()); } if (from.line() != 0) { set_line(from.line()); } + if (from.severity() != 0) { + set_severity(from.severity()); + } + if (from.redactable() != 0) { + set_redactable(from.redactable()); + } if (from.goroutine() != 0) { set_goroutine(from.goroutine()); } - if (from.severity() != 0) { - set_severity(from.severity()); + if (from.counter() != 0) { + set_counter(from.counter()); } } @@ -473,10 +576,14 @@ void Entry::InternalSwap(Entry* other) { GetArenaNoVirtual()); message_.Swap(&other->message_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + tags_.Swap(&other->tags_, &::google::protobuf::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); swap(time_, other->time_); swap(line_, other->line_); - swap(goroutine_, other->goroutine_); swap(severity_, other->severity_); + swap(redactable_, other->redactable_); + swap(goroutine_, other->goroutine_); + swap(counter_, other->counter_); _internal_metadata_.Swap(&other->_internal_metadata_); } diff --git a/c-deps/libroach/protos/util/log/log.pb.h b/c-deps/libroach/protos/util/log/log.pb.h index 60b212c5412f..7ea71bdca2b4 100644 --- a/c-deps/libroach/protos/util/log/log.pb.h +++ b/c-deps/libroach/protos/util/log/log.pb.h @@ -201,6 +201,20 @@ class Entry : public ::google::protobuf::MessageLite /* @@protoc_insertion_point ::std::string* release_message(); void set_allocated_message(::std::string* message); + // string tags = 7; + void clear_tags(); + static const int kTagsFieldNumber = 7; + const ::std::string& tags() const; + void set_tags(const ::std::string& value); + #if LANG_CXX11 + void set_tags(::std::string&& value); + #endif + void set_tags(const char* value); + void set_tags(const char* value, size_t size); + ::std::string* mutable_tags(); + ::std::string* release_tags(); + void set_allocated_tags(::std::string* tags); + // int64 time = 2; void clear_time(); static const int kTimeFieldNumber = 2; @@ -213,17 +227,29 @@ class Entry : public ::google::protobuf::MessageLite /* @@protoc_insertion_point ::google::protobuf::int64 line() const; void set_line(::google::protobuf::int64 value); + // .cockroach.util.log.Severity severity = 1; + void clear_severity(); + static const int kSeverityFieldNumber = 1; + ::cockroach::util::log::Severity severity() const; + void set_severity(::cockroach::util::log::Severity value); + + // bool redactable = 9; + void clear_redactable(); + static const int kRedactableFieldNumber = 9; + bool redactable() const; + void set_redactable(bool value); + // int64 goroutine = 6; void clear_goroutine(); static const int kGoroutineFieldNumber = 6; ::google::protobuf::int64 goroutine() const; void set_goroutine(::google::protobuf::int64 value); - // .cockroach.util.log.Severity severity = 1; - void clear_severity(); - static const int kSeverityFieldNumber = 1; - ::cockroach::util::log::Severity severity() const; - void set_severity(::cockroach::util::log::Severity value); + // uint64 counter = 8; + void clear_counter(); + static const int kCounterFieldNumber = 8; + ::google::protobuf::uint64 counter() const; + void set_counter(::google::protobuf::uint64 value); // @@protoc_insertion_point(class_scope:cockroach.util.log.Entry) private: @@ -231,10 +257,13 @@ class Entry : public ::google::protobuf::MessageLite /* @@protoc_insertion_point ::google::protobuf::internal::InternalMetadataWithArenaLite _internal_metadata_; ::google::protobuf::internal::ArenaStringPtr file_; ::google::protobuf::internal::ArenaStringPtr message_; + ::google::protobuf::internal::ArenaStringPtr tags_; ::google::protobuf::int64 time_; ::google::protobuf::int64 line_; - ::google::protobuf::int64 goroutine_; int severity_; + bool redactable_; + ::google::protobuf::int64 goroutine_; + ::google::protobuf::uint64 counter_; mutable ::google::protobuf::internal::CachedSize _cached_size_; friend struct ::protobuf_util_2flog_2flog_2eproto::TableStruct; }; @@ -698,6 +727,87 @@ inline void Entry::set_allocated_message(::std::string* message) { // @@protoc_insertion_point(field_set_allocated:cockroach.util.log.Entry.message) } +// string tags = 7; +inline void Entry::clear_tags() { + tags_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline const ::std::string& Entry::tags() const { + // @@protoc_insertion_point(field_get:cockroach.util.log.Entry.tags) + return tags_.GetNoArena(); +} +inline void Entry::set_tags(const ::std::string& value) { + + tags_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:cockroach.util.log.Entry.tags) +} +#if LANG_CXX11 +inline void Entry::set_tags(::std::string&& value) { + + tags_.SetNoArena( + &::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:cockroach.util.log.Entry.tags) +} +#endif +inline void Entry::set_tags(const char* value) { + GOOGLE_DCHECK(value != NULL); + + tags_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:cockroach.util.log.Entry.tags) +} +inline void Entry::set_tags(const char* value, size_t size) { + + tags_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:cockroach.util.log.Entry.tags) +} +inline ::std::string* Entry::mutable_tags() { + + // @@protoc_insertion_point(field_mutable:cockroach.util.log.Entry.tags) + return tags_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline ::std::string* Entry::release_tags() { + // @@protoc_insertion_point(field_release:cockroach.util.log.Entry.tags) + + return tags_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); +} +inline void Entry::set_allocated_tags(::std::string* tags) { + if (tags != NULL) { + + } else { + + } + tags_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), tags); + // @@protoc_insertion_point(field_set_allocated:cockroach.util.log.Entry.tags) +} + +// uint64 counter = 8; +inline void Entry::clear_counter() { + counter_ = GOOGLE_ULONGLONG(0); +} +inline ::google::protobuf::uint64 Entry::counter() const { + // @@protoc_insertion_point(field_get:cockroach.util.log.Entry.counter) + return counter_; +} +inline void Entry::set_counter(::google::protobuf::uint64 value) { + + counter_ = value; + // @@protoc_insertion_point(field_set:cockroach.util.log.Entry.counter) +} + +// bool redactable = 9; +inline void Entry::clear_redactable() { + redactable_ = false; +} +inline bool Entry::redactable() const { + // @@protoc_insertion_point(field_get:cockroach.util.log.Entry.redactable) + return redactable_; +} +inline void Entry::set_redactable(bool value) { + + redactable_ = value; + // @@protoc_insertion_point(field_set:cockroach.util.log.Entry.redactable) +} + // ------------------------------------------------------------------- // FileDetails diff --git a/docs/RFCS/20170628_web_session_login.md b/docs/RFCS/20170628_web_session_login.md index 3ab394837318..e722dc1c2073 100644 --- a/docs/RFCS/20170628_web_session_login.md +++ b/docs/RFCS/20170628_web_session_login.md @@ -365,7 +365,7 @@ for incoming requests, but instead would simply need to verify the signature on the token. The major issue with JWT is that it does not provide a way to revoke login -sessions; to do this, we would need to store a blacklist of revoked session IDs, +sessions; to do this, we would need to store a blocklist of revoked session IDs, which removes much of the advantage of not having the sessions table in the first place. diff --git a/docs/RFCS/20190318_error_handling.md b/docs/RFCS/20190318_error_handling.md index f5a3bf06bbd9..83ff3b96548c 100644 --- a/docs/RFCS/20190318_error_handling.md +++ b/docs/RFCS/20190318_error_handling.md @@ -2096,7 +2096,7 @@ To achieve this, the library introduce *error domains*, which are computed attri its domain and preserves its message, structure, etc. - domains are preserved across the network. - a function `EnsureNotInDomain()` (described below) makes it possible to block - errors from one or more “blacklist” domains from escaping an API boundary, + errors from one or more “blocklist” domains from escaping an API boundary, or conditionally transmute them into appropriate substitute errors, in particular [barriers](#barriers-Error-barriers). diff --git a/docs/generated/redact_safe.md b/docs/generated/redact_safe.md new file mode 100644 index 000000000000..956cabe1bef8 --- /dev/null +++ b/docs/generated/redact_safe.md @@ -0,0 +1,29 @@ +The following types are considered always safe for reporting: + +File | Type +--|-- +pkg/kv/kvserver/raft.go | `SnapshotRequest_Type` +pkg/roachpb/data.go | `ReplicaChangeType` +pkg/roachpb/metadata.go | `NodeID` +pkg/roachpb/metadata.go | `StoreID` +pkg/roachpb/metadata.go | `RangeID` +pkg/roachpb/metadata.go | `ReplicaID` +pkg/roachpb/metadata.go | `ReplicaType` +pkg/util/hlc/timestamp.go | `Timestamp` +pkg/util/log/redact.go | `reflect.TypeOf(true)` +pkg/util/log/redact.go | `reflect.TypeOf(123)` +pkg/util/log/redact.go | `reflect.TypeOf(int8(0))` +pkg/util/log/redact.go | `reflect.TypeOf(int16(0))` +pkg/util/log/redact.go | `reflect.TypeOf(int32(0))` +pkg/util/log/redact.go | `reflect.TypeOf(int64(0))` +pkg/util/log/redact.go | `reflect.TypeOf(uint8(0))` +pkg/util/log/redact.go | `reflect.TypeOf(uint16(0))` +pkg/util/log/redact.go | `reflect.TypeOf(uint32(0))` +pkg/util/log/redact.go | `reflect.TypeOf(uint64(0))` +pkg/util/log/redact.go | `reflect.TypeOf(float32(0))` +pkg/util/log/redact.go | `reflect.TypeOf(float64(0))` +pkg/util/log/redact.go | `reflect.TypeOf(complex64(0))` +pkg/util/log/redact.go | `reflect.TypeOf(complex128(0))` +pkg/util/log/redact.go | `reflect.TypeOf(os.Interrupt)` +pkg/util/log/redact.go | `reflect.TypeOf(time.Time{})` +pkg/util/log/redact.go | `reflect.TypeOf(time.Duration(0))` diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md index 14153d12e99d..88af3534afd5 100644 --- a/docs/generated/sql/functions.md +++ b/docs/generated/sql/functions.md @@ -716,13 +716,41 @@ has no relationship with the commit order of concurrent transactions.

st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

-st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography.

+st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

-st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry.

+st_asewkt(geography: geography, maximum_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as possible.

-st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography.

+st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

-st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry.

+st_asewkt(geometry: geometry, maximum_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as possible.

+
+st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
+st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
+st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
+st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
+st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
+st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

@@ -740,9 +768,13 @@ has no relationship with the commit order of concurrent transactions.

st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

-st_astext(geography: geography) → string

Returns the WKT representation of a given Geography.

+st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

-st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry.

+st_astext(geography: geography, maximum_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as possible.

+
+st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
+st_astext(geometry: geometry, maximum_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as possible.

st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the given Geometry.

@@ -1110,6 +1142,13 @@ given Geometry.

st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

This function utilizes the GEOS module.

diff --git a/pkg/base/node_id.go b/pkg/base/node_id.go index 97cc4a91c15e..8132600b9bbb 100644 --- a/pkg/base/node_id.go +++ b/pkg/base/node_id.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/errorutil" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/redact" ) // NodeIDContainer is used to share a single roachpb.NodeID instance between @@ -34,11 +35,17 @@ type NodeIDContainer struct { // String returns the node ID, or "?" if it is unset. func (n *NodeIDContainer) String() string { + return redact.StringWithoutMarkers(n) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (n *NodeIDContainer) SafeFormat(w redact.SafePrinter, _ rune) { val := n.Get() if val == 0 { - return "?" + w.SafeRune('?') + } else { + w.Print(val) } - return strconv.Itoa(int(val)) } // Get returns the current node ID; 0 if it is unset. diff --git a/pkg/blobs/service.go b/pkg/blobs/service.go index 8517a74aac27..a0eee6f3afcb 100644 --- a/pkg/blobs/service.go +++ b/pkg/blobs/service.go @@ -27,10 +27,13 @@ package blobs import ( "context" + "os" "github.com/cockroachdb/cockroach/pkg/blobs/blobspb" "github.com/cockroachdb/errors" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // Service implements the gRPC BlobService which exchanges bulk files between different nodes. @@ -89,5 +92,11 @@ func (s *Service) Delete( // Stat implements the gRPC service. func (s *Service) Stat(ctx context.Context, req *blobspb.StatRequest) (*blobspb.BlobStat, error) { - return s.localStorage.Stat(req.Filename) + resp, err := s.localStorage.Stat(req.Filename) + if os.IsNotExist(err) { + // gRPC hides the underlying golang ErrNotExist error, so we send back an + // equivalent gRPC error which can be handled gracefully on the client side. + return nil, status.Error(codes.NotFound, err.Error()) + } + return resp, err } diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 5c3f448c34d3..35ec5b5c22a8 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -88,17 +88,20 @@ type tableAndIndex struct { // spansForAllTableIndexes returns non-overlapping spans for every index and // table passed in. They would normally overlap if any of them are interleaved. func spansForAllTableIndexes( - codec keys.SQLCodec, tables []*sqlbase.TableDescriptor, revs []BackupManifest_DescriptorRevision, + codec keys.SQLCodec, + tables []sqlbase.TableDescriptorInterface, + revs []BackupManifest_DescriptorRevision, ) []roachpb.Span { added := make(map[tableAndIndex]bool, len(tables)) sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper) for _, table := range tables { - for _, index := range table.AllNonDropIndexes() { - if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(codec, index.ID)), false); err != nil { + tableDesc := table.TableDesc() + for _, index := range tableDesc.AllNonDropIndexes() { + if err := sstIntervalTree.Insert(intervalSpan(tableDesc.IndexSpan(codec, index.ID)), false); err != nil { panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan")) } - added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true + added[tableAndIndex{tableID: table.GetID(), indexID: index.ID}] = true } } // If there are desc revisions, ensure that we also add any index spans @@ -350,18 +353,22 @@ func backupPlanHook( statsCache := p.ExecCfg().TableStatsCache tableStatistics := make([]*stats.TableStatisticProto, 0) - var tables []*sqlbase.TableDescriptor + var tables []sqlbase.TableDescriptorInterface for _, desc := range targetDescs { if dbDesc := desc.GetDatabase(); dbDesc != nil { - if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil { + db := sqlbase.NewImmutableDatabaseDescriptor(*dbDesc) + if err := p.CheckPrivilege(ctx, db, privilege.SELECT); err != nil { return err } } if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil { - if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil { + // TODO(ajwerner): This construction of a wrapper is unfortunate and should + // go away in this PR. + table := sqlbase.NewImmutableTableDescriptor(*tableDesc) + if err := p.CheckPrivilege(ctx, table, privilege.SELECT); err != nil { return err } - tables = append(tables, tableDesc) + tables = append(tables, table) // If the table has any user defined types, error out. for _, col := range tableDesc.Columns { diff --git a/pkg/ccl/backupccl/manifest_handling.go b/pkg/ccl/backupccl/manifest_handling.go index 924361a06d00..fd89dc0b95e4 100644 --- a/pkg/ccl/backupccl/manifest_handling.go +++ b/pkg/ccl/backupccl/manifest_handling.go @@ -103,8 +103,10 @@ func readBackupManifestFromStore( func containsManifest(ctx context.Context, exportStore cloud.ExternalStorage) (bool, error) { r, err := exportStore.ReadFile(ctx, BackupManifestName) if err != nil { - //nolint:returnerrcheck - return false, nil /* TODO(dt): only silence non-exists errors */ + if errors.Is(err, cloud.ErrFileDoesNotExist) { + return false, nil + } + return false, err } r.Close() return true, nil @@ -661,28 +663,30 @@ func VerifyUsableExportTarget( readable string, encryption *roachpb.FileEncryptionOptions, ) error { - if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil { - // TODO(dt): If we audit exactly what not-exists error each ExternalStorage - // returns (and then wrap/tag them), we could narrow this check. + r, err := exportStore.ReadFile(ctx, BackupManifestName) + if err == nil { r.Close() return pgerror.Newf(pgcode.FileAlreadyExists, "%s already contains a %s file", readable, BackupManifestName) } - if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil { - // TODO(dt): If we audit exactly what not-exists error each ExternalStorage - // returns (and then wrap/tag them), we could narrow this check. - r.Close() - return pgerror.Newf(pgcode.FileAlreadyExists, - "%s already contains a %s file", - readable, BackupManifestName) + + if !errors.Is(err, cloud.ErrFileDoesNotExist) { + return errors.Wrapf(err, "%s returned an unexpected error when checking for the existence of %s file", readable, BackupManifestName) } - if r, err := exportStore.ReadFile(ctx, BackupManifestCheckpointName); err == nil { + + r, err = exportStore.ReadFile(ctx, BackupManifestCheckpointName) + if err == nil { r.Close() return pgerror.Newf(pgcode.FileAlreadyExists, "%s already contains a %s file (is another operation already in progress?)", readable, BackupManifestCheckpointName) } + + if !errors.Is(err, cloud.ErrFileDoesNotExist) { + return errors.Wrapf(err, "%s returned an unexpected error when checking for the existence of %s file", readable, BackupManifestCheckpointName) + } + if err := writeBackupManifest( ctx, settings, exportStore, BackupManifestCheckpointName, encryption, &BackupManifest{}, ); err != nil { diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index a4663817bf6f..554858896c4b 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -430,8 +430,8 @@ func splitAndScatter( func WriteTableDescs( ctx context.Context, txn *kv.Txn, - databases []*sqlbase.DatabaseDescriptor, - tables []*sqlbase.TableDescriptor, + databases []*sqlbase.ImmutableDatabaseDescriptor, + tables []sqlbase.TableDescriptorInterface, descCoverage tree.DescriptorCoverage, settings *cluster.Settings, extra []roachpb.KeyValue, @@ -440,7 +440,7 @@ func WriteTableDescs( defer tracing.FinishSpan(span) err := func() error { b := txn.NewBatch() - wroteDBs := make(map[sqlbase.ID]*sqlbase.DatabaseDescriptor) + wroteDBs := make(map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor) for _, desc := range databases { // If the restore is not a full cluster restore we cannot know that // the users on the restoring cluster match the ones that were on the @@ -448,29 +448,30 @@ func WriteTableDescs( if descCoverage != tree.AllDescriptors { desc.Privileges = sqlbase.NewDefaultPrivilegeDescriptor() } - wroteDBs[desc.ID] = desc - if err := catalogkv.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, desc.ID, desc); err != nil { + wroteDBs[desc.GetID()] = desc + if err := catalogkv.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, desc.GetID(), desc); err != nil { return err } // Depending on which cluster version we are restoring to, we decide which // namespace table to write the descriptor into. This may cause wrong // behavior if the cluster version is bumped DURING a restore. - dKey := sqlbase.MakeDatabaseNameKey(ctx, settings, desc.Name) - b.CPut(dKey.Key(keys.SystemSQLCodec), desc.ID, nil) + dKey := sqlbase.MakeDatabaseNameKey(ctx, settings, desc.GetName()) + b.CPut(dKey.Key(keys.SystemSQLCodec), desc.GetID(), nil) } for i := range tables { + table := tables[i].TableDesc() // For full cluster restore, keep privileges as they were. - if wrote, ok := wroteDBs[tables[i].ParentID]; ok { + if wrote, ok := wroteDBs[table.ParentID]; ok { // Leave the privileges of the temp system tables as // the default. - if descCoverage != tree.AllDescriptors || wrote.Name == restoreTempSystemDB { - tables[i].Privileges = wrote.GetPrivileges() + if descCoverage != tree.AllDescriptors || wrote.GetName() == restoreTempSystemDB { + table.Privileges = wrote.GetPrivileges() } } else { - parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, keys.SystemSQLCodec, tables[i].ParentID) + parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, keys.SystemSQLCodec, table.ParentID) if err != nil { return errors.Wrapf(err, - "failed to lookup parent DB %d", errors.Safe(tables[i].ParentID)) + "failed to lookup parent DB %d", errors.Safe(table.ParentID)) } // We don't check priv's here since we checked them during job planning. @@ -478,17 +479,17 @@ func WriteTableDescs( if descCoverage != tree.AllDescriptors { // Default is to copy privs from restoring parent db, like CREATE TABLE. // TODO(dt): Make this more configurable. - tables[i].Privileges = parentDB.GetPrivileges() + table.Privileges = parentDB.GetPrivileges() } } - if err := catalogkv.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, tables[i].ID, tables[i]); err != nil { + if err := catalogkv.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, table.ID, tables[i]); err != nil { return err } // Depending on which cluster version we are restoring to, we decide which // namespace table to write the descriptor into. This may cause wrong // behavior if the cluster version is bumped DURING a restore. - tkey := sqlbase.MakePublicTableNameKey(ctx, settings, tables[i].ParentID, tables[i].Name) - b.CPut(tkey.Key(keys.SystemSQLCodec), tables[i].ID, nil) + tkey := sqlbase.MakePublicTableNameKey(ctx, settings, table.ParentID, table.Name) + b.CPut(tkey.Key(keys.SystemSQLCodec), table.ID, nil) } for _, kv := range extra { b.InitPut(kv.Key, &kv.Value, false) @@ -501,9 +502,9 @@ func WriteTableDescs( } for _, table := range tables { - if err := table.Validate(ctx, txn, keys.SystemSQLCodec); err != nil { + if err := table.TableDesc().Validate(ctx, txn, keys.SystemSQLCodec); err != nil { return errors.Wrapf(err, - "validate table %d", errors.Safe(table.ID)) + "validate table %d", errors.Safe(table.GetID())) } } return nil @@ -564,7 +565,7 @@ func restore( backupManifests []BackupManifest, backupLocalityInfo []jobspb.RestoreDetails_BackupLocalityInfo, endTime hlc.Timestamp, - tables []*sqlbase.TableDescriptor, + tables []sqlbase.TableDescriptorInterface, oldTableIDs []sqlbase.ID, spans []roachpb.Span, job *jobs.Job, @@ -587,7 +588,7 @@ func restore( var rekeys []roachpb.ImportRequest_TableRekey for i := range tables { tableToSerialize := tables[i] - newDescBytes, err := protoutil.Marshal(sqlbase.WrapDescriptor(tableToSerialize)) + newDescBytes, err := protoutil.Marshal(tableToSerialize.DescriptorProto()) if err != nil { return mu.res, errors.NewAssertionErrorWithWrappedErrf(err, "marshaling descriptor") @@ -631,7 +632,7 @@ func restore( pkIDs := make(map[uint64]struct{}) for _, tbl := range tables { - pkIDs[roachpb.BulkOpSummaryID(uint64(tbl.ID), uint64(tbl.PrimaryIndex.ID))] = struct{}{} + pkIDs[roachpb.BulkOpSummaryID(uint64(tbl.GetID()), uint64(tbl.TableDesc().PrimaryIndex.ID))] = struct{}{} } // We're already limiting these on the server-side, but sending all the @@ -789,8 +790,8 @@ func loadBackupSQLDescs( type restoreResumer struct { job *jobs.Job settings *cluster.Settings - databases []*sqlbase.DatabaseDescriptor - tables []*sqlbase.TableDescriptor + databases []*sqlbase.ImmutableDatabaseDescriptor + tables []sqlbase.TableDescriptorInterface descriptorCoverage tree.DescriptorCoverage latestStats []*stats.TableStatisticProto execCfg *sql.ExecutorConfig @@ -834,7 +835,7 @@ func remapRelevantStatistics( func isDatabaseEmpty( ctx context.Context, db *kv.DB, - dbDesc *sql.DatabaseDescriptor, + dbDesc *sqlbase.ImmutableDatabaseDescriptor, ignoredTables map[sqlbase.ID]struct{}, ) (bool, error) { var allDescs []sqlbase.Descriptor @@ -853,7 +854,7 @@ func isDatabaseEmpty( if _, ok := ignoredTables[t.GetID()]; ok { continue } - if t.GetParentID() == dbDesc.ID { + if t.GetParentID() == dbDesc.GetID() { return false, nil } } @@ -866,26 +867,28 @@ func isDatabaseEmpty( func createImportingTables( ctx context.Context, p sql.PlanHookState, sqlDescs []sqlbase.Descriptor, r *restoreResumer, ) ( - []*sqlbase.DatabaseDescriptor, - []*sqlbase.TableDescriptor, + []*sqlbase.ImmutableDatabaseDescriptor, + []sqlbase.TableDescriptorInterface, []sqlbase.ID, []roachpb.Span, error, ) { details := r.job.Details().(jobspb.RestoreDetails) - var databases []*sqlbase.DatabaseDescriptor - var tables []*sqlbase.TableDescriptor + var databases []*sqlbase.ImmutableDatabaseDescriptor + var tables []sqlbase.TableDescriptorInterface var oldTableIDs []sqlbase.ID for _, desc := range sqlDescs { if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil { - tables = append(tables, tableDesc) + table := sqlbase.NewMutableCreatedTableDescriptor(*tableDesc) + tables = append(tables, table) oldTableIDs = append(oldTableIDs, tableDesc.ID) } if dbDesc := desc.GetDatabase(); dbDesc != nil { - if rewrite, ok := details.TableRewrites[dbDesc.ID]; ok { - dbDesc.ID = rewrite.TableID - databases = append(databases, dbDesc) + if rewrite, ok := details.TableRewrites[dbDesc.GetID()]; ok { + rewriteDesc := sqlbase.NewInitialDatabaseDescriptorWithPrivileges( + rewrite.TableID, dbDesc.GetName(), dbDesc.Privileges) + databases = append(databases, rewriteDesc) } } } @@ -896,11 +899,8 @@ func createImportingTables( } } if details.DescriptorCoverage == tree.AllDescriptors { - databases = append(databases, &sqlbase.DatabaseDescriptor{ - ID: sqlbase.ID(tempSystemDBID), - Name: restoreTempSystemDB, - Privileges: sqlbase.NewDefaultPrivilegeDescriptor(), - }) + databases = append(databases, sqlbase.NewInitialDatabaseDescriptor( + sqlbase.ID(tempSystemDBID), restoreTempSystemDB)) } // We get the spans of the restoring tables _as they appear in the backup_, @@ -911,11 +911,15 @@ func createImportingTables( // Assign new IDs and privileges to the tables, and update all references to // use the new IDs. - if err := RewriteTableDescs(tables, details.TableRewrites, details.OverrideDB); err != nil { + tableDescs := make([]*sqlbase.TableDescriptor, len(tables)) + for i, table := range tables { + tableDescs[i] = table.TableDesc() + } + if err := RewriteTableDescs(tableDescs, details.TableRewrites, details.OverrideDB); err != nil { return nil, nil, nil, nil, err } - for _, desc := range tables { + for _, desc := range tableDescs { desc.Version++ desc.State = sqlbase.TableDescriptor_OFFLINE desc.OfflineReason = "restoring" @@ -929,7 +933,7 @@ func createImportingTables( } details.PrepareCompleted = true - details.TableDescs = tables + details.TableDescs = tableDescs // Update the job once all descs have been prepared for ingestion. err := r.job.WithTxn(txn).SetDetails(ctx, details) @@ -1082,23 +1086,23 @@ func (r *restoreResumer) publishTables(ctx context.Context) error { // accessed. b := txn.NewBatch() for _, tbl := range r.tables { - tableDesc := *tbl - tableDesc.Version++ - tableDesc.State = sqlbase.TableDescriptor_PUBLIC + newTableDesc := sqlbase.NewMutableExistingTableDescriptor(*tbl.TableDesc()) + newTableDesc.Version++ + newTableDesc.State = sqlbase.TableDescriptor_PUBLIC // Convert any mutations that were in progress on the table descriptor // when the backup was taken, and convert them to schema change jobs. - newJobs, err := createSchemaChangeJobsFromMutations(ctx, r.execCfg.JobRegistry, r.execCfg.Codec, txn, r.job.Payload().Username, &tableDesc) + newJobs, err := createSchemaChangeJobsFromMutations(ctx, r.execCfg.JobRegistry, r.execCfg.Codec, txn, r.job.Payload().Username, newTableDesc.TableDesc()) if err != nil { return err } newSchemaChangeJobs = append(newSchemaChangeJobs, newJobs...) - existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, r.execCfg.Codec, tbl) + existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, r.execCfg.Codec, tbl.TableDesc()) if err != nil { return errors.Wrap(err, "validating table descriptor has not changed") } b.CPut( - sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), - sqlbase.WrapDescriptor(&tableDesc), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, newTableDesc.ID), + newTableDesc.DescriptorProto(), existingDescVal, ) } @@ -1135,7 +1139,7 @@ func (r *restoreResumer) publishTables(ctx context.Context) error { // rows affected per table, so we use a large number because we want to make // sure that stats always get created/refreshed here. for i := range r.tables { - r.execCfg.StatsRefresher.NotifyMutation(r.tables[i].ID, math.MaxInt32 /* rowsAffected */) + r.execCfg.StatsRefresher.NotifyMutation(r.tables[i].GetID(), math.MaxInt32 /* rowsAffected */) } return nil @@ -1176,9 +1180,9 @@ func (r *restoreResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn tablesToGC := make([]sqlbase.ID, 0, len(details.TableDescs)) for _, tbl := range details.TableDescs { tablesToGC = append(tablesToGC, tbl.ID) - tableDesc := *tbl - tableDesc.Version++ - tableDesc.State = sqlbase.TableDescriptor_DROP + tableToDrop := sqlbase.NewMutableExistingTableDescriptor(*tbl) + tableToDrop.Version++ + tableToDrop.State = sqlbase.TableDescriptor_DROP err := sqlbase.RemovePublicTableNamespaceEntry(ctx, txn, keys.SystemSQLCodec, tbl.ParentID, tbl.Name) if err != nil { return errors.Wrap(err, "dropping tables caused by restore fail/cancel from public namespace") @@ -1188,8 +1192,8 @@ func (r *restoreResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn return errors.Wrap(err, "dropping tables caused by restore fail/cancel") } b.CPut( - sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), - sqlbase.WrapDescriptor(&tableDesc), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableToDrop.ID), + tableToDrop.DescriptorProto(), existingDescVal, ) } @@ -1230,13 +1234,13 @@ func (r *restoreResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn // We need to ignore details.TableDescs since we haven't committed the txn that deletes these. isDBEmpty, err = isDatabaseEmpty(ctx, r.execCfg.DB, dbDesc, ignoredTables) if err != nil { - return errors.Wrapf(err, "checking if database %s is empty during restore cleanup", dbDesc.Name) + return errors.Wrapf(err, "checking if database %s is empty during restore cleanup", dbDesc.GetName()) } if isDBEmpty { - descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, dbDesc.ID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, dbDesc.GetID()) b.Del(descKey) - b.Del(sqlbase.NewDatabaseKey(dbDesc.Name).Key(keys.SystemSQLCodec)) + b.Del(sqlbase.NewDatabaseKey(dbDesc.GetName()).Key(keys.SystemSQLCodec)) } } if err := txn.Run(ctx, b); err != nil { diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 4a20b3a3159f..f329f8cae617 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -132,18 +132,18 @@ func maybeFilterMissingViews( func allocateTableRewrites( ctx context.Context, p sql.PlanHookState, - databasesByID map[sqlbase.ID]*sql.DatabaseDescriptor, + databasesByID map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor, tablesByID map[sqlbase.ID]*sql.TableDescriptor, - restoreDBs []*sqlbase.DatabaseDescriptor, + restoreDBs []*sqlbase.ImmutableDatabaseDescriptor, descriptorCoverage tree.DescriptorCoverage, opts map[string]string, ) (TableRewriteMap, error) { tableRewrites := make(TableRewriteMap) overrideDB, renaming := opts[restoreOptIntoDB] - restoreDBNames := make(map[string]*sqlbase.DatabaseDescriptor, len(restoreDBs)) + restoreDBNames := make(map[string]*sqlbase.ImmutableDatabaseDescriptor, len(restoreDBs)) for _, db := range restoreDBs { - restoreDBNames[db.Name] = db + restoreDBNames[db.GetName()] = db } if len(restoreDBNames) > 0 && renaming { @@ -248,7 +248,7 @@ func allocateTableRewrites( return err } - if table.ParentID == sqlbase.SystemDB.ID { + if table.ParentID == sqlbase.SystemDB.GetID() { // For full cluster backups, put the system tables in the temporary // system table. targetDB = restoreTempSystemDB @@ -264,7 +264,7 @@ func allocateTableRewrites( return errors.Errorf("no database with ID %d in backup for table %q", table.ParentID, table.Name) } - targetDB = database.Name + targetDB = database.GetName() } if _, ok := restoreDBNames[targetDB]; ok { @@ -333,7 +333,7 @@ func allocateTableRewrites( var newID sqlbase.ID var err error if descriptorCoverage == tree.AllDescriptors { - newID = db.ID + newID = db.GetID() } else { newID, err = catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec) if err != nil { @@ -341,8 +341,8 @@ func allocateTableRewrites( } } - tableRewrites[db.ID] = &jobspb.RestoreDetails_TableRewrite{TableID: newID} - for _, tableID := range needsNewParentIDs[db.Name] { + tableRewrites[db.GetID()] = &jobspb.RestoreDetails_TableRewrite{TableID: newID} + for _, tableID := range needsNewParentIDs[db.GetName()] { tableRewrites[tableID] = &jobspb.RestoreDetails_TableRewrite{ParentID: newID} } } @@ -354,7 +354,7 @@ func allocateTableRewrites( tablesToRemap := make([]*sqlbase.TableDescriptor, 0, len(tablesByID)) for _, table := range tablesByID { if descriptorCoverage == tree.AllDescriptors { - if table.ParentID == sqlbase.SystemDB.ID { + if table.ParentID == sqlbase.SystemDB.GetID() { // This is a system table that should be marked for descriptor creation. tablesToRemap = append(tablesToRemap, table) } else { @@ -401,7 +401,7 @@ func maybeUpgradeTableDescsInBackupManifests( for _, desc := range backupManifest.Descriptors { if table := desc.Table(hlc.Timestamp{}); table != nil { protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(codec, table.ID))] = - sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor)) + sqlbase.NewImmutableTableDescriptor(*protoutil.Clone(table).(*sqlbase.TableDescriptor)).DescriptorProto() } } } @@ -414,7 +414,8 @@ func maybeUpgradeTableDescsInBackupManifests( return err } // TODO(lucy): Is this necessary? - backupManifest.Descriptors[j] = *sqlbase.WrapDescriptor(table) + backupManifest.Descriptors[j] = *sqlbase.NewMutableExistingTableDescriptor( + *table).DescriptorProto() } } } @@ -734,11 +735,15 @@ func doRestorePlan( return err } - databasesByID := make(map[sqlbase.ID]*sqlbase.DatabaseDescriptor) + databasesByID := make(map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor) tablesByID := make(map[sqlbase.ID]*sqlbase.TableDescriptor) for _, desc := range sqlDescs { + // TODO(ajwerner): make sqlDescs into a []sqlbase.DescriptorInterface so + // we don't need to do this duplicate construction of the unwrapped + // descriptor. if dbDesc := desc.GetDatabase(); dbDesc != nil { - databasesByID[dbDesc.ID] = dbDesc + dbDesc := sqlbase.NewImmutableDatabaseDescriptor(*dbDesc) + databasesByID[dbDesc.GetID()] = dbDesc } else if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil { tablesByID[tableDesc.ID] = tableDesc } diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index 9cd894df0cf0..9fed33532bc3 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -191,9 +191,9 @@ func backupShowerDefault( for _, manifest := range manifests { descs := make(map[sqlbase.ID]string) for _, descriptor := range manifest.Descriptors { - if database := descriptor.GetDatabase(); database != nil { - if _, ok := descs[database.ID]; !ok { - descs[database.ID] = database.Name + if descriptor.GetDatabase() != nil { + if _, ok := descs[descriptor.GetID()]; !ok { + descs[descriptor.GetID()] = descriptor.GetName() } } } @@ -241,7 +241,8 @@ func backupShowerDefault( FKDisplayMode: sql.OmitMissingFKClausesFromCreate, IgnoreComments: true, } - schema, err := p.ShowCreate(ctx, dbName, manifest.Descriptors, table, displayOptions) + schema, err := p.ShowCreate(ctx, dbName, manifest.Descriptors, + sqlbase.NewImmutableTableDescriptor(*table), displayOptions) if err != nil { continue } diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index fe67f238f79c..52bfb0bd0893 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -25,13 +25,15 @@ import ( type descriptorsMatched struct { // all tables that match targets plus their parent databases. + // + // TODO(ajwerner): Replace this with DescriptorInterface. descs []sqlbase.Descriptor // the databases from which all tables were matched (eg a.* or DATABASE a). expandedDB []sqlbase.ID // explicitly requested DBs (e.g. DATABASE a). - requestedDBs []*sqlbase.DatabaseDescriptor + requestedDBs []*sqlbase.ImmutableDatabaseDescriptor } func (d descriptorsMatched) checkExpansions(coveredDBs []sqlbase.ID) error { @@ -40,7 +42,7 @@ func (d descriptorsMatched) checkExpansions(coveredDBs []sqlbase.ID) error { covered[i] = true } for _, i := range d.requestedDBs { - if !covered[i.ID] { + if !covered[i.GetID()] { return errors.Errorf("cannot RESTORE DATABASE from a backup of individual tables (use SHOW BACKUP to determine available tables)") } } @@ -99,6 +101,8 @@ func (r *descriptorResolver) LookupObject( // newDescriptorResolver prepares a descriptorResolver for the given // known set of descriptors. +// +// TODO(ajwerner): overhaul this structure to use "unwrapped" descriptors. func newDescriptorResolver(descs []sqlbase.Descriptor) (*descriptorResolver, error) { r := &descriptorResolver{ descByID: make(map[sqlbase.ID]sqlbase.Descriptor), @@ -110,12 +114,12 @@ func newDescriptorResolver(descs []sqlbase.Descriptor) (*descriptorResolver, err // check the ParentID for tables, and all the valid parents must be // known before we start to check that. for _, desc := range descs { - if dbDesc := desc.GetDatabase(); dbDesc != nil { - if _, ok := r.dbsByName[dbDesc.Name]; ok { + if desc.GetDatabase() != nil { + if _, ok := r.dbsByName[desc.GetName()]; ok { return nil, errors.Errorf("duplicate database name: %q used for ID %d and %d", - dbDesc.Name, r.dbsByName[dbDesc.Name], dbDesc.ID) + desc.GetName(), r.dbsByName[desc.GetName()], desc.GetID()) } - r.dbsByName[dbDesc.Name] = dbDesc.ID + r.dbsByName[desc.GetName()] = desc.GetID() } // Incidentally, also remember all the descriptors by ID. @@ -191,7 +195,8 @@ func descriptorsMatchingTargets( if _, ok := alreadyRequestedDBs[dbID]; !ok { desc := resolver.descByID[dbID] ret.descs = append(ret.descs, desc) - ret.requestedDBs = append(ret.requestedDBs, desc.GetDatabase()) + ret.requestedDBs = append(ret.requestedDBs, + sqlbase.NewImmutableDatabaseDescriptor(*desc.GetDatabase())) ret.expandedDB = append(ret.expandedDB, dbID) alreadyRequestedDBs[dbID] = struct{}{} alreadyExpandedDBs[dbID] = struct{}{} @@ -464,25 +469,26 @@ func allSQLDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.Descriptor, return sqlDescs, nil } -func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error { +func ensureInterleavesIncluded(tables []sqlbase.TableDescriptorInterface) error { inBackup := make(map[sqlbase.ID]bool, len(tables)) for _, t := range tables { - inBackup[t.ID] = true + inBackup[t.GetID()] = true } for _, table := range tables { - if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { + tableDesc := table.TableDesc() + if err := tableDesc.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { for _, a := range index.Interleave.Ancestors { if !inBackup[a.TableID] { return errors.Errorf( - "cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID, + "cannot backup table %q without interleave parent (ID %d)", table.GetName(), a.TableID, ) } } for _, c := range index.InterleavedBy { if !inBackup[c.Table] { return errors.Errorf( - "cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table, + "cannot backup table %q without interleave child table (ID %d)", table.GetName(), c.Table, ) } } @@ -563,9 +569,9 @@ func fullClusterTargetsBackup( // full cluster backup, and all the user databases. func fullClusterTargets( allDescs []sqlbase.Descriptor, -) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { +) ([]sqlbase.Descriptor, []*sqlbase.ImmutableDatabaseDescriptor, error) { fullClusterDescs := make([]sqlbase.Descriptor, 0, len(allDescs)) - fullClusterDBs := make([]*sqlbase.DatabaseDescriptor, 0) + fullClusterDBs := make([]*sqlbase.ImmutableDatabaseDescriptor, 0) systemTablesToBackup := make(map[string]struct{}, len(fullClusterSystemTables)) for _, tableName := range fullClusterSystemTables { @@ -574,14 +580,15 @@ func fullClusterTargets( for _, desc := range allDescs { if dbDesc := desc.GetDatabase(); dbDesc != nil { + dbDesc := sqlbase.NewImmutableDatabaseDescriptor(*dbDesc) fullClusterDescs = append(fullClusterDescs, desc) - if dbDesc.ID != sqlbase.SystemDB.ID { + if dbDesc.GetID() != sqlbase.SystemDB.GetID() { // The only database that isn't being fully backed up is the system DB. fullClusterDBs = append(fullClusterDBs, dbDesc) } } if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil { - if tableDesc.ParentID == sqlbase.SystemDB.ID { + if tableDesc.ParentID == keys.SystemDatabaseID { // Add only the system tables that we plan to include in a full cluster // backup. if _, ok := systemTablesToBackup[tableDesc.Name]; ok { @@ -628,20 +635,20 @@ func CheckTableExists( func fullClusterTargetsRestore( allDescs []sqlbase.Descriptor, -) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { +) ([]sqlbase.Descriptor, []*sqlbase.ImmutableDatabaseDescriptor, error) { fullClusterDescs, fullClusterDBs, err := fullClusterTargets(allDescs) if err != nil { return nil, nil, err } filteredDescs := make([]sqlbase.Descriptor, 0, len(fullClusterDescs)) for _, desc := range fullClusterDescs { - if _, isDefaultDB := sqlbase.DefaultUserDBs[desc.GetName()]; !isDefaultDB && desc.GetID() != sqlbase.SystemDB.ID { + if _, isDefaultDB := sqlbase.DefaultUserDBs[desc.GetName()]; !isDefaultDB && desc.GetID() != keys.SystemDatabaseID { filteredDescs = append(filteredDescs, desc) } } - filteredDBs := make([]*sqlbase.DatabaseDescriptor, 0, len(fullClusterDBs)) + filteredDBs := make([]*sqlbase.ImmutableDatabaseDescriptor, 0, len(fullClusterDBs)) for _, db := range fullClusterDBs { - if _, isDefaultDB := sqlbase.DefaultUserDBs[db.GetName()]; !isDefaultDB && db.GetID() != sqlbase.SystemDB.ID { + if _, isDefaultDB := sqlbase.DefaultUserDBs[db.GetName()]; !isDefaultDB && db.GetID() != keys.SystemDatabaseID { filteredDBs = append(filteredDBs, db) } } @@ -656,7 +663,7 @@ func selectTargets( targets tree.TargetList, descriptorCoverage tree.DescriptorCoverage, asOf hlc.Timestamp, -) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { +) ([]sqlbase.Descriptor, []*sqlbase.ImmutableDatabaseDescriptor, error) { allDescs, lastBackupManifest := loadSQLDescsFromBackupsAtTime(backupManifests, asOf) if descriptorCoverage == tree.AllDescriptors { diff --git a/pkg/ccl/backupccl/targets_test.go b/pkg/ccl/backupccl/targets_test.go index 7888fbaaf694..13e1a9af4f9b 100644 --- a/pkg/ccl/backupccl/targets_test.go +++ b/pkg/ccl/backupccl/targets_test.go @@ -27,18 +27,32 @@ import ( func TestDescriptorsMatchingTargets(t *testing.T) { defer leaktest.AfterTest(t)() - descriptors := []sqlbase.Descriptor{ - *sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ID: 0, Name: "system"}), - *sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{ID: 1, Name: "foo", ParentID: 0}), - *sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{ID: 2, Name: "bar", ParentID: 0}), - *sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{ID: 4, Name: "baz", ParentID: 3}), - *sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{ID: 6, Name: "offline", ParentID: 0, State: sqlbase.TableDescriptor_OFFLINE}), - *sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ID: 3, Name: "data"}), - *sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ID: 5, Name: "empty"}), - } - // Set the timestamp on the table descriptors. - for _, d := range descriptors { - d.Table(hlc.Timestamp{WallTime: 1}) + // TODO(ajwerner): There should be a constructor for an ImmutableTableDescriptor + // and really all of the leasable descriptor types which includes its initial + // DescriptorMeta. This refactoring precedes the actual adoption of + // DescriptorMeta. + var descriptors []sqlbase.Descriptor + { + // Make shorthand type names for syntactic sugar. + type tbDesc = sqlbase.TableDescriptor + ts1 := hlc.Timestamp{WallTime: 1} + mkTable := func(descriptor tbDesc) sqlbase.Descriptor { + desc := sqlbase.NewImmutableTableDescriptor(descriptor) + desc.ModificationTime = ts1 + return *desc.DescriptorProto() + } + mkDB := func(id sqlbase.ID, name string) sqlbase.Descriptor { + return *sqlbase.NewInitialDatabaseDescriptor(id, name).DescriptorProto() + } + descriptors = []sqlbase.Descriptor{ + mkDB(0, "system"), + mkTable(tbDesc{ID: 1, Name: "foo", ParentID: 0}), + mkTable(tbDesc{ID: 2, Name: "bar", ParentID: 0}), + mkTable(tbDesc{ID: 4, Name: "baz", ParentID: 3}), + mkTable(tbDesc{ID: 6, Name: "offline", ParentID: 0, State: sqlbase.TableDescriptor_OFFLINE}), + mkDB(3, "data"), + mkDB(5, "empty"), + } } tests := []struct { diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index e859590915a3..85ac5745498a 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -595,7 +595,8 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { t.Run(`sinkless`, sinklessTest(testFn)) t.Run(`enterprise`, enterpriseTest(testFn)) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation")) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), + log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } @@ -777,7 +778,8 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { t.Run(`sinkless`, sinklessTest(testFn)) t.Run(`enterprise`, enterpriseTest(testFn)) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation")) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, + regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } @@ -877,7 +879,8 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { t.Run(`sinkless`, sinklessTest(testFn)) t.Run(`enterprise`, enterpriseTest(testFn)) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation")) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, + regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } diff --git a/pkg/ccl/changefeedccl/nemeses_test.go b/pkg/ccl/changefeedccl/nemeses_test.go index 1bd80bb817cd..7e6613699450 100644 --- a/pkg/ccl/changefeedccl/nemeses_test.go +++ b/pkg/ccl/changefeedccl/nemeses_test.go @@ -45,7 +45,8 @@ func TestChangefeedNemeses(t *testing.T) { t.Run(`enterprise`, enterpriseTest(testFn)) t.Run(`cloudstorage`, cloudStorageTest(testFn)) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation")) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, + regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } diff --git a/pkg/ccl/importccl/exportcsv_test.go b/pkg/ccl/importccl/exportcsv_test.go index 8ebcc9033ed7..bd38c5966ec3 100644 --- a/pkg/ccl/importccl/exportcsv_test.go +++ b/pkg/ccl/importccl/exportcsv_test.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload/bank" "github.com/cockroachdb/cockroach/pkg/workload/workloadsql" "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" ) func setupExportableBank(t *testing.T, nodes, rows int) (*sqlutils.SQLRunner, string, func()) { @@ -203,6 +204,49 @@ func TestExportOrder(t *testing.T) { } } +func TestExportUserDefinedTypes(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + baseDir, cleanup := testutils.TempDir(t) + defer cleanup() + tc := testcluster.StartTestCluster( + t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: baseDir}}) + defer tc.Stopper().Stop(ctx) + conn := tc.Conns[0] + sqlDB := sqlutils.MakeSQLRunner(conn) + // Set up some initial state for the tests. + sqlDB.Exec(t, ` +SET experimental_enable_enums = true; +CREATE TYPE greeting AS ENUM ('hello', 'hi'); +CREATE TABLE greeting_table (x greeting, y greeting); +INSERT INTO greeting_table VALUES ('hello', 'hello'), ('hi', 'hi'); +`) + tests := []struct { + stmt string + expected string + }{ + { + stmt: "EXPORT INTO CSV 'nodelocal://0/test/' FROM (SELECT 'hello':::greeting, 'hi':::greeting)", + expected: "hello,hi\n", + }, + { + stmt: "EXPORT INTO CSV 'nodelocal://0/test/' FROM TABLE greeting_table", + expected: "hello,hello\nhi,hi\n", + }, + { + stmt: "EXPORT INTO CSV 'nodelocal://0/test/' FROM (SELECT x, y, enum_first(x) FROM greeting_table)", + expected: "hello,hello,hello\nhi,hi,hello\n", + }, + } + for _, test := range tests { + sqlDB.Exec(t, test.stmt) + // Read the dumped file. + contents, err := ioutil.ReadFile(filepath.Join(baseDir, "test", "n1.0.csv")) + require.NoError(t, err) + require.Equal(t, test.expected, string(contents)) + } +} + func TestExportOrderCompressed(t *testing.T) { defer leaktest.AfterTest(t)() dir, cleanupDir := testutils.TempDir(t) diff --git a/pkg/ccl/importccl/import_processor_test.go b/pkg/ccl/importccl/import_processor_test.go index 55cffd80f364..542c5edb34fe 100644 --- a/pkg/ccl/importccl/import_processor_test.go +++ b/pkg/ccl/importccl/import_processor_test.go @@ -659,8 +659,8 @@ func TestCSVImportCanBeResumed(t *testing.T) { resumePos := js.prog.ResumePos[0] t.Logf("Resume pos: %v\n", js.prog.ResumePos[0]) - // Resume the job and wait for it to complete. - if err := registry.Resume(ctx, nil, jobID); err != nil { + // Unpause the job and wait for it to complete. + if err := registry.Unpause(ctx, nil, jobID); err != nil { t.Fatal(err) } js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusSucceeded == js.status }) @@ -763,8 +763,8 @@ func TestCSVImportMarksFilesFullyProcessed(t *testing.T) { // Send cancellation and unblock import. proceedImport() - // Resume the job and wait for it to complete. - if err := registry.Resume(ctx, nil, jobID); err != nil { + // Unpause the job and wait for it to complete. + if err := registry.Unpause(ctx, nil, jobID); err != nil { t.Fatal(err) } js = queryJobUntil(t, sqlDB.DB, jobID, func(js jobState) bool { return jobs.StatusSucceeded == js.status }) diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index cd1c6e128237..ee6a90eb2c19 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -282,7 +282,7 @@ func importPlanHook( // UnresolvedObjectNames here, rather than TableNames. // We have a target table, so it might specify a DB in its name. un := table.ToUnresolvedObjectName() - found, prefix, descI, err := tree.ResolveTarget(ctx, + found, prefix, dbDescI, err := tree.ResolveTarget(ctx, un, p, p.SessionData().Database, p.SessionData().SearchPath) if err != nil { return pgerror.Wrap(err, pgcode.UndefinedTable, @@ -295,7 +295,7 @@ func importPlanHook( return pgerror.Newf(pgcode.UndefinedObject, "database does not exist: %q", table) } - dbDesc := descI.(*sqlbase.DatabaseDescriptor) + dbDesc := dbDescI.(*sqlbase.ImmutableDatabaseDescriptor) // If this is a non-INTO import that will thus be making a new table, we // need the CREATE priv in the target DB. if !importStmt.Into { @@ -303,7 +303,7 @@ func importPlanHook( return err } } - parentID = dbDesc.ID + parentID = dbDesc.GetID() } else { // No target table means we're importing whatever we find into the session // database, so it must exist. @@ -319,7 +319,7 @@ func importPlanHook( return err } } - parentID = dbDesc.ID + parentID = dbDesc.GetID() } format := roachpb.IOFileFormat{} @@ -855,11 +855,11 @@ func prepareNewTableDescsForIngestion( ctx context.Context, txn *kv.Txn, p sql.PlanHookState, - tables []jobspb.ImportDetails_Table, + importTables []jobspb.ImportDetails_Table, parentID sqlbase.ID, ) ([]*sqlbase.TableDescriptor, error) { var tableDescs []*sqlbase.TableDescriptor - for _, i := range tables { + for _, i := range importTables { if err := backupccl.CheckTableExists(ctx, txn, p.ExecCfg().Codec, parentID, i.Desc.Name); err != nil { return nil, err } @@ -871,8 +871,8 @@ func prepareNewTableDescsForIngestion( // GenerateUniqueDescID if there's any kind of error above. // Reserving a table ID now means we can avoid the rekey work during restore. tableRewrites := make(backupccl.TableRewriteMap) - seqVals := make(map[sqlbase.ID]int64, len(tables)) - for _, tableDesc := range tables { + seqVals := make(map[sqlbase.ID]int64, len(importTables)) + for _, tableDesc := range importTables { id, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec) if err != nil { return nil, err @@ -888,16 +888,18 @@ func prepareNewTableDescsForIngestion( return nil, err } + tables := make([]sqlbase.TableDescriptorInterface, len(tableDescs)) for i := range tableDescs { tableDescs[i].State = sqlbase.TableDescriptor_OFFLINE tableDescs[i].OfflineReason = "importing" - + tables[i] = sqlbase.NewMutableCreatedTableDescriptor(*tableDescs[i]) } var seqValKVs []roachpb.KeyValue - for i := range tableDescs { - if v, ok := seqVals[tableDescs[i].ID]; ok && v != 0 { - key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, tableDescs[i], v, false) + for i := range tables { + tableDesc := tables[i].TableDesc() + if v, ok := seqVals[tables[i].GetID()]; ok && v != 0 { + key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, tableDesc, v, false) if err != nil { return nil, err } @@ -910,8 +912,8 @@ func prepareNewTableDescsForIngestion( // Write the new TableDescriptors and flip the namespace entries over to // them. After this call, any queries on a table will be served by the newly // imported data. - if err := backupccl.WriteTableDescs(ctx, txn, nil /* databases */, tableDescs, tree.RequestedDescriptors, p.ExecCfg().Settings, seqValKVs); err != nil { - return nil, errors.Wrapf(err, "creating tables") + if err := backupccl.WriteTableDescs(ctx, txn, nil /* databases */, tables, tree.RequestedDescriptors, p.ExecCfg().Settings, seqValKVs); err != nil { + return nil, errors.Wrapf(err, "creating importTables") } return tableDescs, nil @@ -926,7 +928,7 @@ func prepareExistingTableDescForIngestion( } // TODO(dt): Ensure no other schema changes can start during ingest. - importing := *desc + importing := sqlbase.NewMutableExistingTableDescriptor(*desc) importing.Version++ // Take the table offline for import. // TODO(dt): audit everywhere we get table descs (leases or otherwise) to @@ -949,13 +951,13 @@ func prepareExistingTableDescForIngestion( } err = txn.CPut(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.ID), - sqlbase.WrapDescriptor(&importing), + importing.DescriptorProto(), existingDesc) if err != nil { return nil, errors.Wrap(err, "another operation is currently operating on the table") } - return &importing, nil + return importing.TableDesc(), nil // NB: we need to wait for the schema change to show up before it is safe // to ingest, but rather than do that here, we'll wait for this schema // change in the job's Resume hook, before running the ingest phase. That @@ -1185,7 +1187,7 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor } b := txn.NewBatch() for _, tbl := range details.Tables { - tableDesc := *tbl.Desc + tableDesc := sqlbase.NewMutableExistingTableDescriptor(*tbl.Desc) tableDesc.Version++ tableDesc.State = sqlbase.TableDescriptor_PUBLIC @@ -1225,7 +1227,7 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor } b.CPut( sqlbase.MakeDescMetadataKey(execCfg.Codec, tableDesc.ID), - sqlbase.WrapDescriptor(&tableDesc), + tableDesc.DescriptorProto(), existingDesc) } if err := txn.Run(ctx, b); err != nil { @@ -1339,7 +1341,7 @@ func (r *importResumer) dropTables( dropTime := int64(1) tablesToGC := make([]sqlbase.ID, 0, len(details.Tables)) for _, tbl := range details.Tables { - tableDesc := *tbl.Desc + tableDesc := sqlbase.NewMutableExistingTableDescriptor(*tbl.Desc) tableDesc.Version++ if tbl.IsNew { tableDesc.State = sqlbase.TableDescriptor_DROP @@ -1368,7 +1370,7 @@ func (r *importResumer) dropTables( } b.CPut( sqlbase.MakeDescMetadataKey(execCfg.Codec, tableDesc.ID), - sqlbase.WrapDescriptor(&tableDesc), + tableDesc.DescriptorProto(), existingDesc) } diff --git a/pkg/ccl/importccl/load.go b/pkg/ccl/importccl/load.go index 328cb9d8a44c..e28282808414 100644 --- a/pkg/ccl/importccl/load.go +++ b/pkg/ccl/importccl/load.go @@ -44,14 +44,14 @@ import ( // TestingGetDescriptorFromDB is a wrapper for getDescriptorFromDB. func TestingGetDescriptorFromDB( ctx context.Context, db *gosql.DB, dbName string, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { return getDescriptorFromDB(ctx, db, dbName) } // getDescriptorFromDB returns the descriptor in bytes of the given table name. func getDescriptorFromDB( ctx context.Context, db *gosql.DB, dbName string, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { var dbDescBytes []byte // Due to the namespace migration, the row may not exist in system.namespace // so a fallback to system.namespace_deprecated is required. @@ -80,11 +80,15 @@ func getDescriptorFromDB( } return nil, errors.Wrap(err, "fetch database descriptor") } - var dbDescWrapper sqlbase.Descriptor - if err := protoutil.Unmarshal(dbDescBytes, &dbDescWrapper); err != nil { + var desc sqlbase.Descriptor + if err := protoutil.Unmarshal(dbDescBytes, &desc); err != nil { return nil, errors.Wrap(err, "unmarshal database descriptor") } - return dbDescWrapper.GetDatabase(), nil + dbDesc := desc.GetDatabase() + if dbDesc == nil { + return nil, errors.Errorf("found non-database descriptor: %v", desc) + } + return sqlbase.NewImmutableDatabaseDescriptor(*dbDesc), nil } return nil, gosql.ErrNoRows } @@ -149,7 +153,7 @@ func Load( var kvBytes int64 backup := backupccl.BackupManifest{ Descriptors: []sqlbase.Descriptor{ - {Union: &sqlbase.Descriptor_Database{Database: dbDesc}}, + {Union: &sqlbase.Descriptor_Database{Database: dbDesc.DatabaseDesc()}}, }, } for { @@ -205,7 +209,7 @@ func Load( var txn *kv.Txn // At this point the CREATE statements in the loaded SQL do not // use the SERIAL type so we need not process SERIAL types here. - desc, err := sql.MakeTableDesc(ctx, txn, nil /* vt */, st, s, dbDesc.ID, keys.PublicSchemaID, + desc, err := sql.MakeTableDesc(ctx, txn, nil /* vt */, st, s, dbDesc.GetID(), keys.PublicSchemaID, 0 /* table ID */, ts, privs, affected, nil, evalCtx, evalCtx.SessionData, false /* temporary */) if err != nil { return backupccl.BackupManifest{}, errors.Wrap(err, "make table desc") diff --git a/pkg/ccl/importccl/load_test.go b/pkg/ccl/importccl/load_test.go index 9fb0f96761b4..f3e5479c3e58 100644 --- a/pkg/ccl/importccl/load_test.go +++ b/pkg/ccl/importccl/load_test.go @@ -54,19 +54,19 @@ func TestGetDescriptorFromDB(t *testing.T) { s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) - aliceDesc := &sqlbase.DatabaseDescriptor{Name: "alice"} - bobDesc := &sqlbase.DatabaseDescriptor{Name: "bob"} + aliceDesc := sqlbase.NewInitialDatabaseDescriptor(10000, "alice") + bobDesc := sqlbase.NewInitialDatabaseDescriptor(9999, "bob") err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } batch := txn.NewBatch() - batch.Put(sqlbase.NewDatabaseKey("bob").Key(keys.SystemSQLCodec), 9999) - batch.Put(sqlbase.NewDeprecatedDatabaseKey("alice").Key(keys.SystemSQLCodec), 10000) + batch.Put(sqlbase.NewDatabaseKey("bob").Key(keys.SystemSQLCodec), bobDesc.GetID()) + batch.Put(sqlbase.NewDeprecatedDatabaseKey("alice").Key(keys.SystemSQLCodec), aliceDesc.GetID()) - batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, 9999), sqlbase.WrapDescriptor(bobDesc)) - batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, 10000), sqlbase.WrapDescriptor(aliceDesc)) + batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, bobDesc.GetID()), bobDesc.DescriptorProto()) + batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, aliceDesc.GetID()), aliceDesc.DescriptorProto()) return txn.CommitInBatch(ctx, batch) }) require.NoError(t, err) @@ -77,8 +77,8 @@ func TestGetDescriptorFromDB(t *testing.T) { expected *sqlbase.DatabaseDescriptor expectedErr error }{ - {"bob", bobDesc, nil}, - {"alice", aliceDesc, nil}, + {"bob", bobDesc.DatabaseDesc(), nil}, + {"alice", aliceDesc.DatabaseDesc(), nil}, {"not_found", nil, gosql.ErrNoRows}, } { t.Run(tc.dbName, func(t *testing.T) { @@ -88,7 +88,7 @@ func TestGetDescriptorFromDB(t *testing.T) { assert.Equal(t, tc.expectedErr, err) } else { assert.NoError(t, err) - assert.Equal(t, tc.expected, ret) + assert.Equal(t, tc.expected, ret.DatabaseDesc()) } }) } diff --git a/pkg/ccl/importccl/read_import_base.go b/pkg/ccl/importccl/read_import_base.go index 162614ee7ddc..07bb9059dd6a 100644 --- a/pkg/ccl/importccl/read_import_base.go +++ b/pkg/ccl/importccl/read_import_base.go @@ -395,7 +395,7 @@ type importFileContext struct { // handleCorruptRow reports an error encountered while processing a row // in an input file. func handleCorruptRow(ctx context.Context, fileCtx *importFileContext, err error) error { - log.Errorf(ctx, "%v", err) + log.Errorf(ctx, "%+v", err) if rowErr := (*importRowError)(nil); errors.As(err, &rowErr) && fileCtx.rejected != nil { fileCtx.rejected <- rowErr.row + "\n" @@ -545,7 +545,7 @@ func runParallelImport( } if producer.Err() == nil { - return importer.flush(ctx) + return importer.close(ctx) } return producer.Err() }) @@ -569,22 +569,25 @@ func (p *parallelImporter) add( return nil } -// Flush flushes currently accumulated data. +// close closes this importer, flushing remaining accumulated data if needed. +func (p *parallelImporter) close(ctx context.Context) error { + if len(p.b.data) > 0 { + return p.flush(ctx) + } + return nil +} + +// flush flushes currently accumulated data. func (p *parallelImporter) flush(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() - default: - } - - // if the batch isn't empty, we need to flush it. - if len(p.b.data) > 0 { - p.recordCh <- p.b + case p.recordCh <- p.b: p.b = batch{ data: make([]interface{}, 0, cap(p.b.data)), } + return nil } - return nil } func (p *parallelImporter) importWorker( diff --git a/pkg/ccl/importccl/read_import_base_test.go b/pkg/ccl/importccl/read_import_base_test.go index 6687ec8d1be8..05123beef311 100644 --- a/pkg/ccl/importccl/read_import_base_test.go +++ b/pkg/ccl/importccl/read_import_base_test.go @@ -9,9 +9,19 @@ package importccl import ( + "context" + "math/rand" "testing" + "time" + "github.com/cockroachdb/cockroach/pkg/sql/row" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" ) func TestRejectedFilename(t *testing.T) { @@ -42,3 +52,138 @@ func TestRejectedFilename(t *testing.T) { } } } + +// nilDataProducer produces infinite stream of nulls. +// It implements importRowProducer. +type nilDataProducer struct{} + +func (p *nilDataProducer) Scan() bool { + return true +} + +func (p *nilDataProducer) Err() error { + return nil +} + +func (p *nilDataProducer) Skip() error { + return nil +} + +func (p *nilDataProducer) Row() (interface{}, error) { + return nil, nil +} + +func (p *nilDataProducer) Progress() float32 { + return 0.0 +} + +var _ importRowProducer = &nilDataProducer{} + +// errorReturningConsumer always returns an error. +// It implements importRowConsumer. +type errorReturningConsumer struct { + err error +} + +func (d *errorReturningConsumer) FillDatums( + _ interface{}, _ int64, c *row.DatumRowConverter, +) error { + return d.err +} + +var _ importRowConsumer = &errorReturningConsumer{} + +// nilDataConsumer consumes and emits infinite stream of null. +// it implements importRowConsumer. +type nilDataConsumer struct{} + +func (n *nilDataConsumer) FillDatums(_ interface{}, _ int64, c *row.DatumRowConverter) error { + c.Datums[0] = tree.DNull + return nil +} + +var _ importRowConsumer = &nilDataConsumer{} + +func TestParallelImportProducerHandlesConsumerErrors(t *testing.T) { + defer leaktest.AfterTest(t)() + + // Dummy descriptor for import + descr := sqlbase.TableDescriptor{ + Name: "test", + Columns: []sqlbase.ColumnDescriptor{ + {Name: "column", ID: 1, Type: types.Int, Nullable: true}, + }, + } + + // Flush datum converter frequently + defer row.TestingSetDatumRowConverterBatchSize(1)() + + // Create KV channel and arrange for it to be drained + kvCh := make(chan row.KVBatch) + defer close(kvCh) + go func() { + for range kvCh { + } + }() + + // Prepare import context, which flushes to kvCh frequently. + importCtx := ¶llelImportContext{ + numWorkers: 1, + batchSize: 2, + evalCtx: testEvalCtx, + tableDesc: &descr, + kvCh: kvCh, + } + + consumer := &errorReturningConsumer{errors.New("consumer aborted")} + + require.Equal(t, consumer.err, + runParallelImport(context.Background(), importCtx, + &importFileContext{}, &nilDataProducer{}, consumer)) +} + +func TestParallelImportProducerHandlesCancellation(t *testing.T) { + defer leaktest.AfterTest(t)() + + // Dummy descriptor for import + descr := sqlbase.TableDescriptor{ + Name: "test", + Columns: []sqlbase.ColumnDescriptor{ + {Name: "column", ID: 1, Type: types.Int, Nullable: true}, + }, + } + + // Flush datum converter frequently + defer row.TestingSetDatumRowConverterBatchSize(1)() + + // Create KV channel and arrange for it to be drained + kvCh := make(chan row.KVBatch) + defer close(kvCh) + go func() { + for range kvCh { + } + }() + + // Prepare import context, which flushes to kvCh frequently. + importCtx := ¶llelImportContext{ + numWorkers: 1, + batchSize: 2, + evalCtx: testEvalCtx, + tableDesc: &descr, + kvCh: kvCh, + } + + // Run a hundred imports, which will timeout shortly after they start. + require.NoError(t, ctxgroup.GroupWorkers(context.Background(), 100, + func(_ context.Context, _ int) error { + timeout := time.Millisecond * time.Duration(250+rand.Intn(250)) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer func(f func()) { + f() + }(cancel) + require.Equal(t, context.DeadlineExceeded, + runParallelImport(ctx, importCtx, + &importFileContext{}, &nilDataProducer{}, &nilDataConsumer{})) + return nil + })) +} diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index 84ca48091c0f..9516187f711c 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -74,7 +74,7 @@ type partitioningTest struct { // scans are each a shorthand for an assertion of where data should live. // The map key is the used for the `WHERE` clause of a `SELECT *` and the - // value is a comma separated whitelist of nodes that are allowed to serve + // value is a comma separated allowlist of nodes that are allowed to serve // this query. Example: `map[string]string{`b = 1`: `n2`}` means that // `SELECT * FROM t WHERE b = 1` is required to be served entirely by node2. // @@ -93,7 +93,7 @@ type partitioningTest struct { createStmt string // tableDesc is the TableDescriptor created by `createStmt`. - tableDesc *sqlbase.TableDescriptor + tableDesc *sqlbase.MutableTableDescriptor // zoneConfigStmt contains SQL that effects the zone configs described // by `configs`. @@ -135,7 +135,7 @@ func (pt *partitioningTest) parse() error { if err != nil { return err } - pt.parsed.tableDesc = mutDesc.TableDesc() + pt.parsed.tableDesc = mutDesc if err := pt.parsed.tableDesc.ValidateTable(); err != nil { return err } @@ -1262,7 +1262,7 @@ func TestSelectPartitionExprs(t *testing.T) { for _, p := range strings.Split(test.partitions, `,`) { partNames = append(partNames, tree.Name(p)) } - expr, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc, partNames) + expr, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc.TableDesc(), partNames) if err != nil { t.Fatalf("%+v", err) } @@ -1273,7 +1273,7 @@ func TestSelectPartitionExprs(t *testing.T) { } t.Run("error", func(t *testing.T) { partNames := tree.NameList{`p33p44`, `nope`} - _, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc, partNames) + _, err := selectPartitionExprs(evalCtx, testData.parsed.tableDesc.TableDesc(), partNames) if !testutils.IsError(err, `unknown partition`) { t.Errorf(`expected "unknown partition" error got: %+v`, err) } diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index 6e21b08869b7..3d08c86e327f 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -289,7 +289,7 @@ func TestGenerateSubzoneSpans(t *testing.T) { clusterID := uuid.MakeV4() hasNewSubzones := false spans, err := sql.GenerateSubzoneSpans( - cluster.NoSettings, clusterID, keys.SystemSQLCodec, test.parsed.tableDesc, test.parsed.subzones, hasNewSubzones) + cluster.NoSettings, clusterID, keys.SystemSQLCodec, test.parsed.tableDesc.TableDesc(), test.parsed.subzones, hasNewSubzones) if err != nil { t.Fatalf("generating subzone spans: %+v", err) } diff --git a/pkg/ccl/storageccl/bench_test.go b/pkg/ccl/storageccl/bench_test.go index 760bc876f0c9..f4a13b62d573 100644 --- a/pkg/ccl/storageccl/bench_test.go +++ b/pkg/ccl/storageccl/bench_test.go @@ -179,9 +179,9 @@ func BenchmarkImport(b *testing.B) { b.Fatalf("bad table descriptor: %+v", tableDesc) } oldStartKey = sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) - newDesc := *tableDesc + newDesc := sqlbase.NewMutableCreatedTableDescriptor(*tableDesc) newDesc.ID = id - newDescBytes, err := protoutil.Marshal(sqlbase.WrapDescriptor(&newDesc)) + newDescBytes, err := protoutil.Marshal(newDesc.DescriptorProto()) if err != nil { panic(err) } diff --git a/pkg/ccl/storageccl/key_rewriter_test.go b/pkg/ccl/storageccl/key_rewriter_test.go index 7edbc7eb2a43..d23ba51a0e28 100644 --- a/pkg/ccl/storageccl/key_rewriter_test.go +++ b/pkg/ccl/storageccl/key_rewriter_test.go @@ -58,14 +58,14 @@ func TestPrefixRewriter(t *testing.T) { func TestKeyRewriter(t *testing.T) { defer leaktest.AfterTest(t)() - desc := sqlbase.NamespaceTable + desc := sqlbase.NewMutableCreatedTableDescriptor(sqlbase.NamespaceTable.TableDescriptor) oldID := desc.ID newID := desc.ID + 1 desc.ID = newID rekeys := []roachpb.ImportRequest_TableRekey{ { OldID: uint32(oldID), - NewDesc: mustMarshalDesc(t, &desc), + NewDesc: mustMarshalDesc(t, desc.TableDesc()), }, } @@ -77,7 +77,8 @@ func TestKeyRewriter(t *testing.T) { } t.Run("normal", func(t *testing.T) { - key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID) + key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, + sqlbase.NamespaceTable.TableDesc(), desc.PrimaryIndex.ID) newKey, ok, err := kr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) @@ -95,7 +96,8 @@ func TestKeyRewriter(t *testing.T) { }) t.Run("prefix end", func(t *testing.T) { - key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID)).PrefixEnd() + key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, + sqlbase.NamespaceTable.TableDesc(), desc.PrimaryIndex.ID)).PrefixEnd() newKey, ok, err := kr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) @@ -114,17 +116,18 @@ func TestKeyRewriter(t *testing.T) { t.Run("multi", func(t *testing.T) { desc.ID = oldID + 10 - desc2 := sqlbase.DescriptorTable + desc2 := sqlbase.NewMutableCreatedTableDescriptor(sqlbase.DescriptorTable.TableDescriptor) desc2.ID += 10 newKr, err := MakeKeyRewriterFromRekeys([]roachpb.ImportRequest_TableRekey{ - {OldID: uint32(oldID), NewDesc: mustMarshalDesc(t, &desc)}, - {OldID: uint32(sqlbase.DescriptorTable.ID), NewDesc: mustMarshalDesc(t, &desc2)}, + {OldID: uint32(oldID), NewDesc: mustMarshalDesc(t, desc.TableDesc())}, + {OldID: uint32(sqlbase.DescriptorTable.ID), NewDesc: mustMarshalDesc(t, desc2.TableDesc())}, }) if err != nil { t.Fatal(err) } - key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID) + key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, + sqlbase.NamespaceTable.TableDesc(), desc.PrimaryIndex.ID) newKey, ok, err := newKr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) @@ -143,7 +146,7 @@ func TestKeyRewriter(t *testing.T) { } func mustMarshalDesc(t *testing.T, tableDesc *sqlbase.TableDescriptor) []byte { - desc := sqlbase.WrapDescriptor(tableDesc) + desc := sqlbase.NewImmutableTableDescriptor(*tableDesc).DescriptorProto() // Set the timestamp to a non-zero value. desc.Table(hlc.Timestamp{WallTime: 1}) bytes, err := protoutil.Marshal(desc) diff --git a/pkg/cli/cert.go b/pkg/cli/cert.go index ef054f3ced2a..2997d3f83f7c 100644 --- a/pkg/cli/cert.go +++ b/pkg/cli/cert.go @@ -173,7 +173,7 @@ func runCreateClientCert(cmd *cobra.Command, args []string) error { var err error var username string // We intentionally allow the `node` user to have a cert. - if username, err = sql.NormalizeAndValidateUsernameNoBlacklist(args[0]); err != nil { + if username, err = sql.NormalizeAndValidateUsernameNoBlocklist(args[0]); err != nil { return errors.Wrap(err, "failed to generate client certificate and key") } diff --git a/pkg/cli/cliflags/flags.go b/pkg/cli/cliflags/flags.go index 1b2fbf157de5..0f9e1f3ea27b 100644 --- a/pkg/cli/cliflags/flags.go +++ b/pkg/cli/cliflags/flags.go @@ -1172,4 +1172,14 @@ List of nodes to exclude. Can be specified as a comma-delimited list of node IDs or ranges of node IDs, for example: 5,10-20,23. The default is to not exclude any node.`, } + + ZipRedactLogs = FlagInfo{ + Name: "redact-logs", + Description: ` +Redact text that may contain confidential data or PII from retrieved +log entries. Note that this flag only operates on log entries; +other items retrieved by the zip command may still consider +confidential data or PII. +`, + } ) diff --git a/pkg/cli/context.go b/pkg/cli/context.go index c658ca993721..c56c2567c47f 100644 --- a/pkg/cli/context.go +++ b/pkg/cli/context.go @@ -93,6 +93,7 @@ func initCLIDefaults() { sqlCtx.echo = false zipCtx.nodes = nodeSelection{} + zipCtx.redactLogs = false dumpCtx.dumpMode = dumpBoth dumpCtx.asOf = "" @@ -274,6 +275,10 @@ var sqlCtx = struct { // Defaults set by InitCLIDefaults() above. var zipCtx struct { nodes nodeSelection + + // redactLogs indicates whether log files should be redacted + // server-side during retrieval. + redactLogs bool } // dumpCtx captures the command-line parameters of the `dump` command. diff --git a/pkg/cli/debug.go b/pkg/cli/debug.go index 14a89906fa27..6ca6eadf6b03 100644 --- a/pkg/cli/debug.go +++ b/pkg/cli/debug.go @@ -1187,26 +1187,35 @@ the captured parts will be printed. RunE: runDebugMergeLogs, } +// TODO(knz): this struct belongs elsewhere. +// See: https://github.com/cockroachdb/cockroach/issues/49509 var debugMergeLogsOpts = struct { - from time.Time - to time.Time - filter *regexp.Regexp - program *regexp.Regexp - file *regexp.Regexp - prefix string + from time.Time + to time.Time + filter *regexp.Regexp + program *regexp.Regexp + file *regexp.Regexp + prefix string + keepRedactable bool + redactInput bool }{ - program: regexp.MustCompile("^cockroach.*$"), - file: regexp.MustCompile(log.FilePattern), + program: regexp.MustCompile("^cockroach.*$"), + file: regexp.MustCompile(log.FilePattern), + keepRedactable: true, + redactInput: false, } func runDebugMergeLogs(cmd *cobra.Command, args []string) error { o := debugMergeLogsOpts + + inputEditMode := log.SelectEditMode(o.redactInput, o.keepRedactable) + s, err := newMergedStreamFromPatterns(context.Background(), - args, o.file, o.program, o.from, o.to) + args, o.file, o.program, o.from, o.to, inputEditMode) if err != nil { return err } - return writeLogStream(s, cmd.OutOrStdout(), o.filter, o.prefix) + return writeLogStream(s, cmd.OutOrStdout(), o.filter, o.prefix, o.keepRedactable) } // DebugCmdsForRocksDB lists debug commands that access rocksdb through the engine @@ -1303,6 +1312,7 @@ func init() { f = debugMergeLogsCommand.Flags() f.Var(flagutil.Time(&debugMergeLogsOpts.from), "from", "time before which messages should be filtered") + // TODO(knz): the "to" should be named "until" - it's a time boundary, not a space boundary. f.Var(flagutil.Time(&debugMergeLogsOpts.to), "to", "time after which messages should be filtered") f.Var(flagutil.Regexp(&debugMergeLogsOpts.filter), "filter", @@ -1314,4 +1324,8 @@ func init() { "if no such group exists, program-filter is ignored") f.StringVar(&debugMergeLogsOpts.prefix, "prefix", "${host}> ", "expansion template (see regexp.Expand) used as prefix to merged log messages evaluated on file-pattern") + f.BoolVar(&debugMergeLogsOpts.keepRedactable, "redactable-output", debugMergeLogsOpts.keepRedactable, + "keep the output log file redactable") + f.BoolVar(&debugMergeLogsOpts.redactInput, "redact", debugMergeLogsOpts.redactInput, + "redact the input files to remove sensitive information") } diff --git a/pkg/cli/debug_merge_logs.go b/pkg/cli/debug_merge_logs.go index 970fdfe37491..dbf28786dbe1 100644 --- a/pkg/cli/debug_merge_logs.go +++ b/pkg/cli/debug_merge_logs.go @@ -36,7 +36,9 @@ type logStream interface { // writeLogStream pops messages off of s and writes them to out prepending // prefix per message and filtering messages which match filter. -func writeLogStream(s logStream, out io.Writer, filter *regexp.Regexp, prefix string) error { +func writeLogStream( + s logStream, out io.Writer, filter *regexp.Regexp, prefix string, keepRedactable bool, +) error { const chanSize = 1 << 16 // 64k const maxWriteBufSize = 1 << 18 // 256kB @@ -61,6 +63,9 @@ func writeLogStream(s logStream, out io.Writer, filter *regexp.Regexp, prefix st if _, err = w.Write(prefixBytes); err != nil { return err } + if !keepRedactable { + ei.Redactable = false + } return ei.Format(w) } @@ -166,6 +171,7 @@ func newMergedStreamFromPatterns( patterns []string, filePattern, programFilter *regexp.Regexp, from, to time.Time, + editMode log.EditSensitiveData, ) (logStream, error) { paths, err := expandPatterns(patterns) if err != nil { @@ -176,7 +182,7 @@ func newMergedStreamFromPatterns( if err != nil { return nil, err } - return newMergedStream(ctx, files, from, to) + return newMergedStream(ctx, files, from, to, editMode) } func groupIndex(re *regexp.Regexp, groupName string) int { @@ -189,7 +195,7 @@ func groupIndex(re *regexp.Regexp, groupName string) int { } func newMergedStream( - ctx context.Context, files []fileInfo, from, to time.Time, + ctx context.Context, files []fileInfo, from, to time.Time, editMode log.EditSensitiveData, ) (*mergedStream, error) { // TODO(ajwerner): think about clock movement and PID const maxConcurrentFiles = 256 // should be far less than the FD limit @@ -200,7 +206,7 @@ func newMergedStream( return func() error { sem <- struct{}{} defer func() { <-sem }() - s, err := newFileLogStream(files[i], from, to) + s, err := newFileLogStream(files[i], from, to, editMode) if s != nil { res[i] = s } @@ -431,6 +437,7 @@ type fileLogStream struct { f *os.File d *log.EntryDecoder read bool + editMode log.EditSensitiveData e log.Entry err error @@ -442,11 +449,14 @@ type fileLogStream struct { // encountered during the initial peek, that error is returned. The underlying // file is always closed before returning from this constructor so the initial // peek does not consume resources. -func newFileLogStream(fi fileInfo, from, to time.Time) (logStream, error) { +func newFileLogStream( + fi fileInfo, from, to time.Time, editMode log.EditSensitiveData, +) (logStream, error) { s := &fileLogStream{ - fi: fi, - from: from, - to: to, + fi: fi, + from: from, + to: to, + editMode: editMode, } if _, ok := s.peek(); !ok { if err := s.error(); err != io.EOF { @@ -469,10 +479,10 @@ func (s *fileLogStream) open() bool { if s.f, s.err = os.Open(s.fi.path); s.err != nil { return false } - if s.err = seekToFirstAfterFrom(s.f, s.from); s.err != nil { + if s.err = seekToFirstAfterFrom(s.f, s.from, s.editMode); s.err != nil { return false } - s.d = log.NewEntryDecoder(bufio.NewReaderSize(s.f, readBufSize)) + s.d = log.NewEntryDecoder(bufio.NewReaderSize(s.f, readBufSize), s.editMode) return true } @@ -527,7 +537,7 @@ func (s *fileLogStream) error() error { return s.err } // seekToFirstAfterFrom uses binary search to seek to an offset after all // entries which occur before from. -func seekToFirstAfterFrom(f *os.File, from time.Time) (err error) { +func seekToFirstAfterFrom(f *os.File, from time.Time, editMode log.EditSensitiveData) (err error) { if from.IsZero() { return nil } @@ -546,7 +556,7 @@ func seekToFirstAfterFrom(f *os.File, from time.Time) (err error) { panic(err) } var e log.Entry - err := log.NewEntryDecoder(f).Decode(&e) + err := log.NewEntryDecoder(f, editMode).Decode(&e) if err != nil { if err == io.EOF { return true @@ -559,7 +569,7 @@ func seekToFirstAfterFrom(f *os.File, from time.Time) (err error) { return err } var e log.Entry - if err := log.NewEntryDecoder(f).Decode(&e); err != nil { + if err := log.NewEntryDecoder(f, editMode).Decode(&e); err != nil { return err } _, err = f.Seek(int64(offset), io.SeekStart) diff --git a/pkg/cli/debug_merge_logs_test.go b/pkg/cli/debug_merge_logs_test.go index d4fe488201f6..6c3bc28c479d 100644 --- a/pkg/cli/debug_merge_logs_test.go +++ b/pkg/cli/debug_merge_logs_test.go @@ -30,32 +30,34 @@ type testCase struct { var cases = []testCase{ { - name: "1.all", - args: []string{"testdata/merge_logs/1/*/*"}, + name: "1.all", + args: []string{"testdata/merge_logs/1/*/*"}, + flags: []string{"--redact=false", "--redactable-output=false"}, }, { name: "1.filter-program", args: []string{"testdata/merge_logs/1/*/*"}, - flags: []string{"--program-filter", "not-cockroach"}, + flags: []string{"--redact=false", "--redactable-output=false", "--program-filter", "not-cockroach"}, }, { name: "1.seek-past-end-of-file", args: []string{"testdata/merge_logs/1/*/*"}, - flags: []string{"--from", "181130 22:15:07.525317"}, + flags: []string{"--redact=false", "--redactable-output=false", "--from", "181130 22:15:07.525317"}, }, { name: "1.filter-message", args: []string{"testdata/merge_logs/1/*/*"}, - flags: []string{"--filter", "gossip"}, + flags: []string{"--redact=false", "--redactable-output=false", "--filter", "gossip"}, }, { - name: "2.multiple-files-from-node", - args: []string{"testdata/merge_logs/2/*/*"}, + name: "2.multiple-files-from-node", + args: []string{"testdata/merge_logs/2/*/*"}, + flags: []string{"--redact=false", "--redactable-output=false"}, }, { name: "2.skip-file", args: []string{"testdata/merge_logs/2/*/*"}, - flags: []string{"--from", "181130 22:15:07.525316"}, + flags: []string{"--redact=false", "--redactable-output=false", "--from", "181130 22:15:07.525316"}, }, { name: "2.remove-duplicates", @@ -67,36 +69,36 @@ var cases = []testCase{ "testdata/merge_logs/2/2.logs/cockroach.test-0002.ubuntu.2018-11-30T22_06_47Z.003959.log", "testdata/merge_logs/2/2.logs/roachprod.log", }, - flags: []string{"--from", "181130 22:15:07.525316"}, + flags: []string{"--redact=false", "--redactable-output=false", "--from", "181130 22:15:07.525316"}, }, { name: "3.non-standard", args: []string{"testdata/merge_logs/3/*/*"}, - flags: []string{"--file-pattern", ".*", "--prefix", ""}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--prefix", ""}, }, { // Prints only lines that match the filter (if no submatches). name: "4.filter", args: []string{"testdata/merge_logs/4/*"}, - flags: []string{"--file-pattern", ".*", "--filter", "3:0"}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", "3:0"}, }, { // Prints only the submatch. name: "4.filter-submatch", args: []string{"testdata/merge_logs/4/*"}, - flags: []string{"--file-pattern", ".*", "--filter", "(3:)0"}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", "(3:)0"}, }, { // Prints only the submatches. name: "4.filter-submatch-double", args: []string{"testdata/merge_logs/4/*"}, - flags: []string{"--file-pattern", ".*", "--filter", "(3):(0)"}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", "(3):(0)"}, }, { // Simple grep for a panic line only. name: "4.filter-npe", args: []string{"testdata/merge_logs/4/npe.log"}, - flags: []string{"--file-pattern", ".*", "--filter", `(panic: .*)`}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", `(panic: .*)`}, }, { // Grep for a panic and a few lines more. This is often not so useful @@ -104,7 +106,7 @@ var cases = []testCase{ // source of the panic is harder to find. name: "4.filter-npe-with-context", args: []string{"testdata/merge_logs/4/npe.log"}, - flags: []string{"--file-pattern", ".*", "--filter", `(?m)(panic:.(?:.*\n){0,5})`}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", `(?m)(panic:.(?:.*\n){0,5})`}, }, { // This regexp attempts to find the source of the panic, essentially by @@ -121,8 +123,29 @@ var cases = []testCase{ // usually alternate with panic(). name: "4.filter-npe-origin-stack-only", args: []string{"testdata/merge_logs/4/npe-repanic.log"}, // (?:panic\(.*)* - flags: []string{"--file-pattern", ".*", "--filter", `(?m)^(panic\(.*\n.*\n.*\n.*\n[^p].*)`}, - }} + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*", "--filter", `(?m)^(panic\(.*\n.*\n.*\n.*\n[^p].*)`}, + }, + { + name: "5.redact-off-redactable-off", + args: []string{"testdata/merge_logs/5/redactable.log"}, + flags: []string{"--redact=false", "--redactable-output=false", "--file-pattern", ".*"}, + }, + { + name: "5.redact-off-redactable-on", + args: []string{"testdata/merge_logs/5/redactable.log"}, + flags: []string{"--redact=false", "--redactable-output=true", "--file-pattern", ".*"}, + }, + { + name: "5.redact-on-redactable-off", + args: []string{"testdata/merge_logs/5/redactable.log"}, + flags: []string{"--redact=true", "--redactable-output=false", "--file-pattern", ".*"}, + }, + { + name: "5.redact-on-redactable-on", + args: []string{"testdata/merge_logs/5/redactable.log"}, + flags: []string{"--redact=true", "--redactable-output=true", "--file-pattern", ".*"}, + }, +} func (c testCase) run(t *testing.T) { outBuf := bytes.Buffer{} diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go index 8f55a8dda0fc..dc942d5c8fe9 100644 --- a/pkg/cli/flags.go +++ b/pkg/cli/flags.go @@ -273,9 +273,8 @@ func init() { flag.Hidden = true } switch flag.Name { - case logflags.NoRedirectStderrName: - flag.Hidden = true - case logflags.ShowLogsName: + case logflags.ShowLogsName, // test-only flag + logflags.RedactableLogsName: // support-only flag flag.Hidden = true case logflags.LogToStderrName: // The actual default value for --logtostderr is overridden in @@ -562,6 +561,7 @@ func init() { f := debugZipCmd.Flags() VarFlag(f, &zipCtx.nodes.inclusive, cliflags.ZipNodes) VarFlag(f, &zipCtx.nodes.exclusive, cliflags.ZipExcludeNodes) + BoolFlag(f, &zipCtx.redactLogs, cliflags.ZipRedactLogs, zipCtx.redactLogs) } // Decommission command. diff --git a/pkg/cli/interactive_tests/test_missing_log_output.tcl b/pkg/cli/interactive_tests/test_missing_log_output.tcl index c4c92687a28a..e53817e51ac5 100644 --- a/pkg/cli/interactive_tests/test_missing_log_output.tcl +++ b/pkg/cli/interactive_tests/test_missing_log_output.tcl @@ -97,6 +97,10 @@ send "cat logs/db/logs/cockroach.log\r" eexpect "a SQL panic has occurred" eexpect "helloworld" eexpect "a panic has occurred" +eexpect ":/# " +send "cat logs/db/logs/cockroach-stderr.log\r" +eexpect "panic" +eexpect "helloworld" eexpect "goroutine" eexpect ":/# " diff --git a/pkg/cli/interactive_tests/test_sql_mem_monitor.tcl b/pkg/cli/interactive_tests/test_sql_mem_monitor.tcl index 09a4f116dacd..9efb75daa489 100644 --- a/pkg/cli/interactive_tests/test_sql_mem_monitor.tcl +++ b/pkg/cli/interactive_tests/test_sql_mem_monitor.tcl @@ -43,10 +43,14 @@ eexpect ":/# " send "ulimit -v [ expr {3*$vmem/2} ]\r" eexpect ":/# " -# Start a server with this limit set. The server will now run in the foreground. -send "$argv start-single-node --insecure --max-sql-memory=25% --no-redirect-stderr -s=path=logs/db \r" -eexpect "restarted pre-existing node" -sleep 1 +# Start a server with this limit set. +send "$argv start-single-node --insecure --max-sql-memory=25% -s=path=logs/db --background --pid-file=server_pid\r" +eexpect ":/# " +send "$argv sql --insecure -e 'select 1'\r" +eexpect "1 row" +eexpect ":/# " +send "tail -F logs/db/logs/cockroach-stderr.log\r" +eexpect "stderr capture started" # Spawn a client. spawn $argv sql @@ -68,7 +72,8 @@ eexpect root@ # Disable query distribution to force in-memory computation. send "set distsql=off;\r" eexpect SET -send "with a as (select * from generate_series(1,10000)) select * from a as a, a as b, a as c, a as d limit 10;\r" +send "with a as (select * from generate_series(1,10000000)) select * from a as a, a as b, a as c, a as d limit 10;\r" +eexpect "connection lost" # Check that the query crashed the server set spawn_id $shell_spawn_id @@ -83,18 +88,19 @@ expect { "signal SIGSEGV" {} timeout { handle_timeout "memory allocation error" } } +# Stop the tail command. +interrupt eexpect ":/# " # Check that the client got a bad connection error set spawn_id $client_spawn_id -eexpect "bad connection" eexpect root@ end_test start_test "Ensure that memory monitoring prevents crashes" # Re-launch a server with relatively lower limit for SQL memory set spawn_id $shell_spawn_id -send "$argv start-single-node --insecure --max-sql-memory=1000K --no-redirect-stderr -s=path=logs/db \r" +send "$argv start-single-node --insecure --max-sql-memory=1000K -s=path=logs/db \r" eexpect "restarted pre-existing node" sleep 2 diff --git a/pkg/cli/start.go b/pkg/cli/start.go index ee150905ba0d..9014060bf531 100644 --- a/pkg/cli/start.go +++ b/pkg/cli/start.go @@ -1223,11 +1223,6 @@ func setupAndInitializeLoggingAndProfiling( return nil, err } - // NB: this message is a crutch until #33458 is addressed. Without it, - // the calls to log.Shout below can be the first use of logging, hitting - // the bug described in the issue. - log.Infof(ctx, "logging to directory %s", logDir) - // Start the log file GC daemon to remove files that make the log // directory too large. log.StartGCDaemon(ctx) @@ -1243,6 +1238,14 @@ func setupAndInitializeLoggingAndProfiling( }() } + // Initialize the redirection of stderr and log redaction. Note, + // this function must be called even if there is no log directory + // configured, to verify whether the combination of requested flags + // is valid. + if _, err := log.SetupRedactionAndStderrRedirects(); err != nil { + return nil, err + } + // We want to be careful to still produce useful debug dumps if the // server configuration has disabled logging to files. outputDirectory := "." diff --git a/pkg/cli/testdata/merge_logs/4/npe-repanic.log b/pkg/cli/testdata/merge_logs/4/npe-repanic.log index ef61d1c33611..ed451f5d640f 100644 --- a/pkg/cli/testdata/merge_logs/4/npe-repanic.log +++ b/pkg/cli/testdata/merge_logs/4/npe-repanic.log @@ -1,4 +1,4 @@ -I190412 10:06:00.490104 183717 ccl/partitionccl/partition_test.go:204 i was created via the following code: +I190412 10:06:00.490104 183717 ccl/partitionccl/partition_test.go:204 i was created via the following code: func main() { defer cleanup() defer cleanup() diff --git a/pkg/cli/testdata/merge_logs/5/redactable.log b/pkg/cli/testdata/merge_logs/5/redactable.log new file mode 100644 index 000000000000..9053f9a06a61 --- /dev/null +++ b/pkg/cli/testdata/merge_logs/5/redactable.log @@ -0,0 +1,2 @@ +I190412 10:06:00.490104 183717 server/server.go:1423 ⋮ safe ‹unsafe› +I190412 10:06:00.490104 183717 server/server.go:1424 unknownsafe diff --git a/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-off b/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-off new file mode 100644 index 000000000000..8412e23e7b8f --- /dev/null +++ b/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-off @@ -0,0 +1,2 @@ +> I190412 10:06:00.490104 183717 server/server.go:1423 safe unsafe +> I190412 10:06:00.490104 183717 server/server.go:1424 unknownsafe diff --git a/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-on b/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-on new file mode 100644 index 000000000000..6ae8be145614 --- /dev/null +++ b/pkg/cli/testdata/merge_logs/results/5.redact-off-redactable-on @@ -0,0 +1,2 @@ +> I190412 10:06:00.490104 183717 server/server.go:1423 ⋮ safe ‹unsafe› +> I190412 10:06:00.490104 183717 server/server.go:1424 ⋮ ‹unknownsafe› diff --git a/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-off b/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-off new file mode 100644 index 000000000000..9a4d3ec83be5 --- /dev/null +++ b/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-off @@ -0,0 +1,2 @@ +> I190412 10:06:00.490104 183717 server/server.go:1423 safe ‹×› +> I190412 10:06:00.490104 183717 server/server.go:1424 ‹×› diff --git a/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-on b/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-on new file mode 100644 index 000000000000..5547f1c32955 --- /dev/null +++ b/pkg/cli/testdata/merge_logs/results/5.redact-on-redactable-on @@ -0,0 +1,2 @@ +> I190412 10:06:00.490104 183717 server/server.go:1423 ⋮ safe ‹×› +> I190412 10:06:00.490104 183717 server/server.go:1424 ⋮ ‹×› diff --git a/pkg/cli/zip.go b/pkg/cli/zip.go index 40b6b677e503..459ab201416e 100644 --- a/pkg/cli/zip.go +++ b/pkg/cli/zip.go @@ -574,7 +574,9 @@ func runDebugZip(cmd *cobra.Command, args []string) (retErr error) { if err := runZipRequestWithTimeout(baseCtx, fmt.Sprintf("requesting log file %s", file.Name), timeout, func(ctx context.Context) error { entries, err = status.LogFile( - ctx, &serverpb.LogFileRequest{NodeId: id, File: file.Name}) + ctx, &serverpb.LogFileRequest{ + NodeId: id, File: file.Name, Redact: zipCtx.redactLogs, KeepRedactable: true, + }) return err }); err != nil { if err := z.createError(name, err); err != nil { @@ -586,11 +588,34 @@ func runDebugZip(cmd *cobra.Command, args []string) (retErr error) { if err != nil { return err } + warnRedactLeak := false for _, e := range entries.Entries { + // If the user requests redaction, and some non-redactable + // data was found in the log, *despite KeepRedactable + // being set*, this means that this zip client is talking + // to a node that doesn't yet know how to redact. This + // also means that node may be leaking sensitive data. + // + // In that case, we do the redaction work ourselves in the + // most conservative way possible. (It's not great that + // possibly confidential data flew over the network, but + // at least it stops here.) + if zipCtx.redactLogs && !e.Redactable { + e.Message = "REDACTEDBYZIP" + // We're also going to print a warning at the end. + warnRedactLeak = true + } if err := e.Format(logOut); err != nil { return err } } + if warnRedactLeak { + // Defer the warning, so that it does not get "drowned" as + // part of the main zip output. + defer func(fileName string) { + fmt.Fprintf(stderr, "WARNING: server-side redaction failed for %s, completed client-side (--redact-logs=true)\n", fileName) + }(file.Name) + } } } diff --git a/pkg/cli/zip_test.go b/pkg/cli/zip_test.go index c773105b37fd..4c14b268970d 100644 --- a/pkg/cli/zip_test.go +++ b/pkg/cli/zip_test.go @@ -65,7 +65,7 @@ SELECT concat('crdb_internal.', table_name) as name FROM [ SELECT table_name FROM [ SHOW TABLES FROM crdb_internal ] ] WHERE table_name NOT IN ( - -- whitelisted tables that don't need to be in debug zip + -- allowlisted tables that don't need to be in debug zip 'backward_dependencies', 'builtin_functions', 'create_statements', diff --git a/pkg/cmd/compile-builds/main.go b/pkg/cmd/compile-builds/main.go new file mode 100644 index 000000000000..39954478ce4c --- /dev/null +++ b/pkg/cmd/compile-builds/main.go @@ -0,0 +1,36 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// compile-builds attempts to compile all CRDB builds we support. + +package main + +import ( + "go/build" + "log" + + "github.com/cockroachdb/cockroach/pkg/release" +) + +func main() { + pkg, err := build.Import("github.com/cockroachdb/cockroach", "", build.FindOnly) + if err != nil { + log.Fatalf("unable to locate CRDB directory: %s", err) + } + + for _, target := range release.SupportedTargets { + if err := release.MakeRelease( + target, + pkg.Dir, + ); err != nil { + log.Fatal(err) + } + } +} diff --git a/pkg/cmd/generate-binary/main.go b/pkg/cmd/generate-binary/main.go index 0f19b64d4e34..2b40ab3f0998 100644 --- a/pkg/cmd/generate-binary/main.go +++ b/pkg/cmd/generate-binary/main.go @@ -17,7 +17,7 @@ // The target postgres server must accept plaintext (non-ssl) connections from // the postgres:postgres account. A suitable server can be started with: // -// `docker run -p 127.0.0.1:5432:5432 postgres` +// `docker run -p 127.0.0.1:5432:5432 postgres:11` // // The output of this file generates pkg/sql/pgwire/testdata/encodings.json. package main @@ -197,6 +197,9 @@ var inputs = map[string][]string{ "2.2289971159100284", "3409589268520956934250.234098732045120934701239846", "42", + "42.0", + "420000", + "420000.0", }, "'%s'::float8": { diff --git a/pkg/cmd/publish-artifacts/main.go b/pkg/cmd/publish-artifacts/main.go index c87d976fc046..a1e12909d297 100644 --- a/pkg/cmd/publish-artifacts/main.go +++ b/pkg/cmd/publish-artifacts/main.go @@ -13,7 +13,6 @@ package main import ( "archive/tar" "archive/zip" - "bufio" "bytes" "compress/gzip" "flag" @@ -31,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + "github.com/cockroachdb/cockroach/pkg/release" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/kr/pretty" ) @@ -56,20 +56,6 @@ var testableS3 = func() (s3putter, error) { return s3.New(sess), nil } -var libsRe = func() *regexp.Regexp { - libs := strings.Join([]string{ - regexp.QuoteMeta("linux-vdso.so."), - regexp.QuoteMeta("librt.so."), - regexp.QuoteMeta("libpthread.so."), - regexp.QuoteMeta("libdl.so."), - regexp.QuoteMeta("libm.so."), - regexp.QuoteMeta("libc.so."), - regexp.QuoteMeta("libresolv.so."), - strings.Replace(regexp.QuoteMeta("ld-linux-ARCH.so."), "ARCH", ".*", -1), - }, "|") - return regexp.MustCompile(libs) -}() - var osVersionRe = regexp.MustCompile(`\d+(\.\d+)*-`) var isRelease = flag.Bool("release", false, "build in release mode instead of bleeding-edge mode") @@ -157,17 +143,7 @@ func main() { }) } - for _, target := range []struct { - buildType string - suffix string - }{ - // TODO(tamird): consider shifting this information into the builder - // image; it's conceivable that we'll want to target multiple versions - // of a given triple. - {buildType: "darwin", suffix: ".darwin-10.9-amd64"}, - {buildType: "linux-gnu", suffix: ".linux-2.6.32-gnu-amd64"}, - {buildType: "windows", suffix: ".windows-6.2-amd64.exe"}, - } { + for _, target := range release.SupportedTargets { for i, extraArgs := range []struct { goflags string suffix string @@ -188,9 +164,9 @@ func main() { o.VersionStr = versionStr o.BucketName = bucketName o.Branch = branch - o.BuildType = target.buildType + o.BuildType = target.BuildType o.GoFlags = extraArgs.goflags - o.Suffix = extraArgs.suffix + target.suffix + o.Suffix = extraArgs.suffix + target.Suffix o.Tags = extraArgs.tags log.Printf("building %s", pretty.Sprint(o)) @@ -256,58 +232,30 @@ func buildArchive(svc s3putter, o opts) { } func buildOneCockroach(svc s3putter, o opts) { + log.Printf("building cockroach %s", pretty.Sprint(o)) defer func() { log.Printf("done building cockroach: %s", pretty.Sprint(o)) }() - { - args := []string{o.BuildType} - args = append(args, fmt.Sprintf("%s=%s", "GOFLAGS", o.GoFlags)) - args = append(args, fmt.Sprintf("%s=%s", "SUFFIX", o.Suffix)) - args = append(args, fmt.Sprintf("%s=%s", "TAGS", o.Tags)) - args = append(args, fmt.Sprintf("%s=%s", "BUILDCHANNEL", "official-binary")) - if *isRelease { - args = append(args, fmt.Sprintf("%s=%s", "BUILD_TAGGED_RELEASE", "true")) - } - cmd := exec.Command("mkrelease", args...) - cmd.Dir = o.PkgDir - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - log.Printf("%s %s", cmd.Env, cmd.Args) - if err := cmd.Run(); err != nil { - log.Fatalf("%s: %s", cmd.Args, err) - } + opts := []release.MakeReleaseOption{ + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "GOFLAGS", o.GoFlags)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "SUFFIX", o.Suffix)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "TAGS", o.Tags)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "BUILDCHANNEL", "official-binary")), + } + if *isRelease { + opts = append(opts, release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "BUILD_TAGGED_RELEASE", "true"))) } - if strings.Contains(o.BuildType, "linux") { - binaryName := "./cockroach" + o.Suffix - - cmd := exec.Command(binaryName, "version") - cmd.Dir = o.PkgDir - cmd.Env = append(cmd.Env, "MALLOC_CONF=prof:true") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - log.Printf("%s %s", cmd.Env, cmd.Args) - if err := cmd.Run(); err != nil { - log.Fatalf("%s %s: %s", cmd.Env, cmd.Args, err) - } - - cmd = exec.Command("ldd", binaryName) - cmd.Dir = o.PkgDir - log.Printf("%s %s", cmd.Env, cmd.Args) - out, err := cmd.Output() - if err != nil { - log.Fatalf("%s: out=%q err=%s", cmd.Args, out, err) - } - scanner := bufio.NewScanner(bytes.NewReader(out)) - for scanner.Scan() { - if line := scanner.Text(); !libsRe.MatchString(line) { - log.Fatalf("%s is not properly statically linked:\n%s", binaryName, out) - } - } - if err := scanner.Err(); err != nil { - log.Fatal(err) - } + if err := release.MakeRelease( + release.SupportedTarget{ + BuildType: o.BuildType, + Suffix: o.Suffix, + }, + o.PkgDir, + opts..., + ); err != nil { + log.Fatal(err) } o.Base = "cockroach" + o.Suffix diff --git a/pkg/cmd/publish-provisional-artifacts/main.go b/pkg/cmd/publish-provisional-artifacts/main.go index 586dce191840..b92b62783b48 100644 --- a/pkg/cmd/publish-provisional-artifacts/main.go +++ b/pkg/cmd/publish-provisional-artifacts/main.go @@ -13,7 +13,6 @@ package main import ( "archive/tar" "archive/zip" - "bufio" "bytes" "compress/gzip" "flag" @@ -31,8 +30,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + "github.com/cockroachdb/cockroach/pkg/release" "github.com/cockroachdb/cockroach/pkg/util/version" - "github.com/cockroachdb/errors" "github.com/kr/pretty" ) @@ -49,8 +48,6 @@ type s3I interface { PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) } -type execRunner func(*exec.Cmd) ([]byte, error) - func makeS3() (s3I, error) { sess, err := session.NewSession(&aws.Config{ Region: aws.String("us-east-1"), @@ -61,20 +58,6 @@ func makeS3() (s3I, error) { return s3.New(sess), nil } -var libsRe = func() *regexp.Regexp { - libs := strings.Join([]string{ - regexp.QuoteMeta("linux-vdso.so."), - regexp.QuoteMeta("librt.so."), - regexp.QuoteMeta("libpthread.so."), - regexp.QuoteMeta("libdl.so."), - regexp.QuoteMeta("libm.so."), - regexp.QuoteMeta("libc.so."), - regexp.QuoteMeta("libresolv.so."), - strings.Replace(regexp.QuoteMeta("ld-linux-ARCH.so."), "ARCH", ".*", -1), - }, "|") - return regexp.MustCompile(libs) -}() - var osVersionRe = regexp.MustCompile(`\d+(\.\d+)*-`) var isReleaseF = flag.Bool("release", false, "build in release mode instead of bleeding-edge mode") @@ -106,16 +89,7 @@ func main() { if err != nil { log.Fatalf("Creating AWS S3 session: %s", err) } - execFn := func(c *exec.Cmd) ([]byte, error) { - if c.Stdout != nil { - return nil, errors.New("exec: Stdout already set") - } - var stdout bytes.Buffer - c.Stdout = io.MultiWriter(&stdout, os.Stdout) - err := c.Run() - return stdout.Bytes(), err - } - + execFn := release.DefaultExecFn branch, ok := os.LookupEnv(teamcityBuildBranchKey) if !ok { log.Fatalf("VCS branch environment variable %s is not set", teamcityBuildBranchKey) @@ -149,7 +123,7 @@ type runFlags struct { pkgDir string } -func run(svc s3I, execFn execRunner, flags runFlags) { +func run(svc s3I, execFn release.ExecFn, flags runFlags) { // TODO(dan): non-release builds currently aren't broken into the two // phases. Instead, the provisional phase does them both. if !flags.isRelease { @@ -198,17 +172,7 @@ func run(svc s3I, execFn execRunner, flags runFlags) { log.Printf("Using S3 bucket: %s", bucketName) var cockroachBuildOpts []opts - for _, target := range []struct { - buildType string - suffix string - }{ - // TODO(tamird): consider shifting this information into the builder - // image; it's conceivable that we'll want to target multiple versions - // of a given triple. - {buildType: "darwin", suffix: ".darwin-10.9-amd64"}, - {buildType: "linux-gnu", suffix: ".linux-2.6.32-gnu-amd64"}, - {buildType: "windows", suffix: ".windows-6.2-amd64.exe"}, - } { + for _, target := range release.SupportedTargets { for i, extraArgs := range []struct { goflags string suffix string @@ -229,9 +193,9 @@ func run(svc s3I, execFn execRunner, flags runFlags) { o.Branch = flags.branch o.VersionStr = versionStr o.BucketName = bucketName - o.BuildType = target.buildType + o.BuildType = target.BuildType o.GoFlags = extraArgs.goflags - o.Suffix = extraArgs.suffix + target.suffix + o.Suffix = extraArgs.suffix + target.Suffix o.Tags = extraArgs.tags o.Base = "cockroach" + o.Suffix @@ -252,7 +216,7 @@ func run(svc s3I, execFn execRunner, flags runFlags) { if flags.doProvisional { for _, o := range cockroachBuildOpts { - buildCockroach(svc, execFn, flags, o) + buildCockroach(execFn, flags, o) absolutePath := filepath.Join(o.PkgDir, o.Base) binary, err := os.Open(absolutePath) @@ -285,7 +249,7 @@ func run(svc s3I, execFn execRunner, flags runFlags) { } } -func buildAndPutArchive(svc s3I, execFn execRunner, o opts) { +func buildAndPutArchive(svc s3I, execFn release.ExecFn, o opts) { log.Printf("building archive %s", pretty.Sprint(o)) defer func() { log.Printf("done building archive: %s", pretty.Sprint(o)) @@ -325,59 +289,33 @@ func buildAndPutArchive(svc s3I, execFn execRunner, o opts) { } } -func buildCockroach(svc s3I, execFn execRunner, flags runFlags, o opts) { +func buildCockroach(execFn release.ExecFn, flags runFlags, o opts) { log.Printf("building cockroach %s", pretty.Sprint(o)) defer func() { log.Printf("done building cockroach: %s", pretty.Sprint(o)) }() - { - args := []string{o.BuildType} - args = append(args, fmt.Sprintf("%s=%s", "GOFLAGS", o.GoFlags)) - args = append(args, fmt.Sprintf("%s=%s", "SUFFIX", o.Suffix)) - args = append(args, fmt.Sprintf("%s=%s", "TAGS", o.Tags)) - args = append(args, fmt.Sprintf("%s=%s", "BUILDCHANNEL", "official-binary")) - if flags.isRelease { - args = append(args, fmt.Sprintf("%s=%s", "BUILDINFO_TAG", o.VersionStr)) - args = append(args, fmt.Sprintf("%s=%s", "BUILD_TAGGED_RELEASE", "true")) - } - cmd := exec.Command("mkrelease", args...) - cmd.Dir = o.PkgDir - cmd.Stderr = os.Stderr - log.Printf("%s %s", cmd.Env, cmd.Args) - if out, err := execFn(cmd); err != nil { - log.Fatalf("%s %s: %s\n\n%s", cmd.Env, cmd.Args, err, out) - } + opts := []release.MakeReleaseOption{ + release.WithMakeReleaseOptionExecFn(execFn), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "GOFLAGS", o.GoFlags)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "SUFFIX", o.Suffix)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "TAGS", o.Tags)), + release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "BUILDCHANNEL", "official-binary")), } - - if strings.Contains(o.BuildType, "linux") { - binaryName := "./cockroach" + o.Suffix - - cmd := exec.Command(binaryName, "version") - cmd.Dir = o.PkgDir - cmd.Env = append(cmd.Env, "MALLOC_CONF=prof:true") - cmd.Stderr = os.Stderr - log.Printf("%s %s", cmd.Env, cmd.Args) - if out, err := execFn(cmd); err != nil { - log.Fatalf("%s %s: %s\n\n%s", cmd.Env, cmd.Args, err, out) - } - - cmd = exec.Command("ldd", binaryName) - cmd.Dir = o.PkgDir - log.Printf("%s %s", cmd.Env, cmd.Args) - out, err := execFn(cmd) - if err != nil { - log.Fatalf("%s: out=%q err=%s", cmd.Args, out, err) - } - scanner := bufio.NewScanner(bytes.NewReader(out)) - for scanner.Scan() { - if line := scanner.Text(); !libsRe.MatchString(line) { - log.Fatalf("%s is not properly statically linked:\n%s", binaryName, out) - } - } - if err := scanner.Err(); err != nil { - log.Fatal(err) - } + if flags.isRelease { + opts = append(opts, release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "BUILDINFO_TAG", o.VersionStr))) + opts = append(opts, release.WithMakeReleaseOptionBuildArg(fmt.Sprintf("%s=%s", "BUILD_TAGGED_RELEASE", "true"))) + } + + if err := release.MakeRelease( + release.SupportedTarget{ + BuildType: o.BuildType, + Suffix: o.Suffix, + }, + o.PkgDir, + opts..., + ); err != nil { + log.Fatal(err) } } diff --git a/pkg/cmd/publish-provisional-artifacts/main_test.go b/pkg/cmd/publish-provisional-artifacts/main_test.go index c9b74fe55e1f..25a1f19e0602 100644 --- a/pkg/cmd/publish-provisional-artifacts/main_test.go +++ b/pkg/cmd/publish-provisional-artifacts/main_test.go @@ -21,6 +21,7 @@ import ( "unicode/utf8" "github.com/aws/aws-sdk-go/service/s3" + "github.com/cockroachdb/cockroach/pkg/release" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" @@ -227,7 +228,7 @@ func TestBless(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var s3 mockS3 - var execFn execRunner // bless shouldn't exec anything + var execFn release.ExecFn // bless shouldn't exec anything run(&s3, execFn, test.flags) require.Equal(t, test.expectedGets, s3.gets) require.Equal(t, test.expectedPuts, s3.puts) diff --git a/pkg/cmd/roachprod/install/cassandra_yaml.go b/pkg/cmd/roachprod/install/cassandra_yaml.go index d14346f61cf6..1676bd8c87b6 100644 --- a/pkg/cmd/roachprod/install/cassandra_yaml.go +++ b/pkg/cmd/roachprod/install/cassandra_yaml.go @@ -88,7 +88,7 @@ num_tokens: 256 # May either be "true" or "false" to enable globally hinted_handoff_enabled: true -# When hinted_handoff_enabled is true, a black list of data centers that will not +# When hinted_handoff_enabled is true, a blocklist of data centers that will not # perform hinted handoff # hinted_handoff_disabled_datacenters: # - DC1 diff --git a/pkg/cmd/roachtest/bank.go b/pkg/cmd/roachtest/bank.go index a817446daadb..89ae1310de41 100644 --- a/pkg/cmd/roachtest/bank.go +++ b/pkg/cmd/roachtest/bank.go @@ -336,7 +336,7 @@ func isExpectedRelocateError(err error) bool { // for more failure modes not caught here. We decided to avoid adding // to this catchall and to fix the root causes instead. // We've also seen "breaker open" errors here. - whitelist := []string{ + allowlist := []string{ "descriptor changed", "unable to remove replica .* which is not present", "unable to add replica .* which is already present", @@ -344,7 +344,7 @@ func isExpectedRelocateError(err error) bool { "failed to apply snapshot: raft group deleted", "snapshot failed:", } - pattern := "(" + strings.Join(whitelist, "|") + ")" + pattern := "(" + strings.Join(allowlist, "|") + ")" return testutils.IsError(err, pattern) } diff --git a/pkg/cmd/roachtest/blacklist_test.go b/pkg/cmd/roachtest/blocklist_test.go similarity index 79% rename from pkg/cmd/roachtest/blacklist_test.go rename to pkg/cmd/roachtest/blocklist_test.go index 1fad6dec5796..8cf6e35e59ac 100644 --- a/pkg/cmd/roachtest/blacklist_test.go +++ b/pkg/cmd/roachtest/blocklist_test.go @@ -24,22 +24,22 @@ import ( ) const githubAPITokenEnv = "GITHUB_API_TOKEN" -const runBlacklistEnv = "RUN_BLACKLIST_TEST" +const runBlocklistEnv = "RUN_BLOCKLIST_TEST" -func TestBlacklists(t *testing.T) { - if _, ok := os.LookupEnv(runBlacklistEnv); !ok { - t.Skipf("Blackist test is only run if %s is set", runBlacklistEnv) +func TestBlocklists(t *testing.T) { + if _, ok := os.LookupEnv(runBlocklistEnv); !ok { + t.Skipf("Blocklist test is only run if %s is set", runBlocklistEnv) } - blacklists := map[string]blacklist{ - "hibernate": hibernateBlackList20_1, - "pgjdbc": pgjdbcBlackList20_1, - "psycopg": psycopgBlackList20_1, - "django": djangoBlacklist20_1, - "sqlAlchemy": sqlAlchemyBlacklist20_1, - "libpq": libPQBlacklist20_1, - "gopg": gopgBlackList20_1, - "pgx": pgxBlacklist20_1, + blocklists := map[string]blocklist{ + "hibernate": hibernateBlockList20_1, + "pgjdbc": pgjdbcBlockList20_1, + "psycopg": psycopgBlockList20_1, + "django": djangoBlocklist20_1, + "sqlAlchemy": sqlAlchemyBlocklist20_1, + "libpq": libPQBlocklist20_1, + "gopg": gopgBlockList20_1, + "pgx": pgxBlocklist20_1, } type reasonCount struct { reason string @@ -48,7 +48,7 @@ func TestBlacklists(t *testing.T) { } var failureMap = make(map[string]*reasonCount, 200) - for suite, bl := range blacklists { + for suite, bl := range blocklists { for _, reason := range bl { if _, ok := failureMap[reason]; !ok { failureMap[reason] = &reasonCount{ @@ -106,6 +106,6 @@ func TestBlacklists(t *testing.T) { } if anyClosed { - t.Fatal("Some closed issues appear in blacklists") + t.Fatal("Some closed issues appear in blocklists") } } diff --git a/pkg/cmd/roachtest/canary.go b/pkg/cmd/roachtest/canary.go index 8c0e823b7a09..350e0bb9cc5e 100644 --- a/pkg/cmd/roachtest/canary.go +++ b/pkg/cmd/roachtest/canary.go @@ -28,35 +28,35 @@ import ( // TODO(bram): There are more common elements between all the canary tests, // factor more of them into here. -// blacklist is a lists of known test errors and failures. -type blacklist map[string]string +// blocklist is a lists of known test errors and failures. +type blocklist map[string]string -// blacklistForVersion contains both a blacklist of known test errors and +// blocklistForVersion contains both a blocklist of known test errors and // failures but also an optional ignorelist for flaky tests. // When the test suite is run, the results are compared to this list. -// Any passed test that is not on this blacklist is reported as PASS - expected -// Any passed test that is on this blacklist is reported as PASS - unexpected -// Any failed test that is on this blacklist is reported as FAIL - expected -// Any failed test that is not on blackthis list is reported as FAIL - unexpected -// Any test on this blacklist that is not run is reported as FAIL - not run +// Any passed test that is not on this blocklist is reported as PASS - expected +// Any passed test that is on this blocklist is reported as PASS - unexpected +// Any failed test that is on this blocklist is reported as FAIL - expected +// Any failed test that is not on blocklist list is reported as FAIL - unexpected +// Any test on this blocklist that is not run is reported as FAIL - not run // Ant test in the ignorelist is reported as SKIP if it is run -type blacklistForVersion struct { +type blocklistForVersion struct { versionPrefix string - blacklistname string - blacklist blacklist + blocklistname string + blocklist blocklist ignorelistname string - ignorelist blacklist + ignorelist blocklist } -type blacklistsForVersion []blacklistForVersion +type blocklistsForVersion []blocklistForVersion -// getLists returns the appropriate blacklist and ignorelist based on the +// getLists returns the appropriate blocklist and ignorelist based on the // cockroach version. This check only looks to ensure that the prefix that // matches. -func (b blacklistsForVersion) getLists(version string) (string, blacklist, string, blacklist) { +func (b blocklistsForVersion) getLists(version string) (string, blocklist, string, blocklist) { for _, info := range b { if strings.HasPrefix(version, info.versionPrefix) { - return info.blacklistname, info.blacklist, info.ignorelistname, info.ignorelist + return info.blocklistname, info.blocklist, info.ignorelistname, info.ignorelist } } return "", nil, "", nil diff --git a/pkg/cmd/roachtest/django.go b/pkg/cmd/roachtest/django.go index 67895192558a..4d5d4746d260 100644 --- a/pkg/cmd/roachtest/django.go +++ b/pkg/cmd/roachtest/django.go @@ -165,15 +165,15 @@ func registerDjango(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailureList, ignoredlistName, ignoredlist := djangoBlacklists.getLists(version) + blocklistName, expectedFailureList, ignoredlistName, ignoredlist := djangoBlocklists.getLists(version) if expectedFailureList == nil { - t.Fatalf("No django blacklist defined for cockroach version %s", version) + t.Fatalf("No django blocklist defined for cockroach version %s", version) } if ignoredlist == nil { t.Fatalf("No django ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) // TODO (rohany): move this to a file backed buffer if the output becomes // too large. @@ -197,7 +197,7 @@ func registerDjango(r *testRegistry) { results := newORMTestsResults() results.parsePythonUnitTestOutput(fullTestResults, expectedFailureList, ignoredlist) results.summarizeAll( - t, "django" /* ormName */, blacklistName, + t, "django" /* ormName */, blocklistName, expectedFailureList, version, djangoLatestTag, ) } diff --git a/pkg/cmd/roachtest/django_blacklist.go b/pkg/cmd/roachtest/django_blocklist.go similarity index 94% rename from pkg/cmd/roachtest/django_blacklist.go rename to pkg/cmd/roachtest/django_blocklist.go index 26790e01ad39..bd465f57d101 100644 --- a/pkg/cmd/roachtest/django_blacklist.go +++ b/pkg/cmd/roachtest/django_blocklist.go @@ -165,18 +165,18 @@ var enabledDjangoTests = []string{ "view_tests", } -var djangoBlacklists = blacklistsForVersion{ - {"v19.2", "djangoBlacklist19_2", djangoBlacklist19_2, "djangoIgnoreList19_2", djangoIgnoreList19_2}, - {"v20.1", "djangoBlacklist20_1", djangoBlacklist20_1, "djangoIgnoreList20_1", djangoIgnoreList20_1}, - {"v20.2", "djangoBlacklist20_2", djangoBlacklist20_2, "djangoIgnoreList20_2", djangoIgnoreList20_2}, +var djangoBlocklists = blocklistsForVersion{ + {"v19.2", "djangoBlocklist19_2", djangoBlocklist19_2, "djangoIgnoreList19_2", djangoIgnoreList19_2}, + {"v20.1", "djangoBlocklist20_1", djangoBlocklist20_1, "djangoIgnoreList20_1", djangoIgnoreList20_1}, + {"v20.2", "djangoBlocklist20_2", djangoBlocklist20_2, "djangoIgnoreList20_2", djangoIgnoreList20_2}, } // Maintain that this list is alphabetized. -var djangoBlacklist20_2 = blacklist{} +var djangoBlocklist20_2 = blocklist{} -var djangoBlacklist20_1 = blacklist{} +var djangoBlocklist20_1 = blocklist{} -var djangoBlacklist19_2 = blacklist{ +var djangoBlocklist19_2 = blocklist{ "admin_views.tests.AdminViewBasicTest.test_date_hierarchy_timezone_dst": "unknown", "admin_views.tests.SecureViewTests.test_secure_view_shows_login_if_not_logged_in": "unknown", "admin_views.tests.SecureViewTests.test_staff_member_required_decorator_works_with_argument": "unknown", @@ -218,8 +218,8 @@ var djangoBlacklist19_2 = blacklist{ //"postgres_tests.test_array.TestOtherTypesExactQuerying.test_exact_decimals": "23468", } -var djangoIgnoreList20_2 = blacklist{} +var djangoIgnoreList20_2 = blocklist{} -var djangoIgnoreList20_1 = blacklist{} +var djangoIgnoreList20_1 = blocklist{} -var djangoIgnoreList19_2 = blacklist{} +var djangoIgnoreList19_2 = blocklist{} diff --git a/pkg/cmd/roachtest/gopg.go b/pkg/cmd/roachtest/gopg.go index a2ab7c975bdf..f40c4312d9b4 100644 --- a/pkg/cmd/roachtest/gopg.go +++ b/pkg/cmd/roachtest/gopg.go @@ -92,21 +92,21 @@ func registerGopg(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignorelistName, ignorelist := gopgBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignorelist := gopgBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No gopg blacklist defined for cockroach version %s", version) + t.Fatalf("No gopg blocklist defined for cockroach version %s", version) } if ignorelist == nil { t.Fatalf("No gopg ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) _ = c.RunE(ctx, node, fmt.Sprintf("mkdir -p %s", resultsDirPath)) t.Status("running gopg test suite") // go test provides colorful output which - when redirected - interferes - // with matching of the blacklisted tests, so we will strip off all color + // with matching of the blocklisted tests, so we will strip off all color // code escape sequences. const removeColorCodes = `sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]//g"` // Note that this is expected to return an error, since the test suite @@ -145,7 +145,7 @@ func registerGopg(r *testRegistry) { results.parseJUnitXML(t, expectedFailures, ignorelist, xmlResults) results.summarizeFailed( - t, "gopg", blacklistName, expectedFailures, version, latestTag, + t, "gopg", blocklistName, expectedFailures, version, latestTag, 0, /* notRunCount */ ) } @@ -166,7 +166,7 @@ func registerGopg(r *testRegistry) { // test suites from gopg ORM tests. TestGinkgo is a test harness that runs // several test suites described by gopg. func gormParseTestGinkgoOutput( - r *ormTestsResults, rawResults []byte, expectedFailures, ignorelist blacklist, + r *ormTestsResults, rawResults []byte, expectedFailures, ignorelist blocklist, ) (err error) { var ( totalRunCount, totalTestCount int @@ -265,7 +265,7 @@ func gormParseTestGinkgoOutput( } } - // Blacklist contains both the expected failures for "global" tests as well + // Blocklist contains both the expected failures for "global" tests as well // as TestGinkgo's tests. We need to figure the number of the latter ones. testGinkgoExpectedFailures := 0 for failure := range expectedFailures { diff --git a/pkg/cmd/roachtest/gopg_blacklist.go b/pkg/cmd/roachtest/gopg_blocklist.go similarity index 93% rename from pkg/cmd/roachtest/gopg_blacklist.go rename to pkg/cmd/roachtest/gopg_blocklist.go index aae504c63ca8..7fb30add732e 100644 --- a/pkg/cmd/roachtest/gopg_blacklist.go +++ b/pkg/cmd/roachtest/gopg_blocklist.go @@ -10,10 +10,10 @@ package main -var gopgBlacklists = blacklistsForVersion{ - {"v19.2", "gopgBlackList19_2", gopgBlackList19_2, "gopgIgnoreList19_2", gopgIgnoreList19_2}, - {"v20.1", "gopgBlackList20_1", gopgBlackList20_1, "gopgIgnoreList20_1", gopgIgnoreList20_1}, - {"v20.2", "gopgBlackList20_2", gopgBlackList20_2, "gopgIgnoreList20_2", gopgIgnoreList20_2}, +var gopgBlocklists = blocklistsForVersion{ + {"v19.2", "gopgBlockList19_2", gopgBlockList19_2, "gopgIgnoreList19_2", gopgIgnoreList19_2}, + {"v20.1", "gopgBlockList20_1", gopgBlockList20_1, "gopgIgnoreList20_1", gopgIgnoreList20_1}, + {"v20.2", "gopgBlockList20_2", gopgBlockList20_2, "gopgIgnoreList20_2", gopgIgnoreList20_2}, } // These are lists of known gopg test errors and failures. @@ -22,12 +22,12 @@ var gopgBlacklists = blacklistsForVersion{ // Any failed test that is not on this list is reported as FAIL - unexpected. // // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var gopgBlackList20_2 = gopgBlackList20_1 +var gopgBlockList20_2 = gopgBlockList20_1 -var gopgBlackList20_1 = blacklist{ +var gopgBlockList20_1 = blocklist{ "pg | CopyFrom/CopyTo | copies corrupted data to a table": "41608", "pg | CopyFrom/CopyTo | copies data from a table and to a table": "41608", "pg | CountEstimate | works": "17511", @@ -47,7 +47,7 @@ var gopgBlackList20_1 = blacklist{ "v9.TestUnixSocket": "31113", } -var gopgBlackList19_2 = blacklist{ +var gopgBlockList19_2 = blocklist{ "pg | CopyFrom/CopyTo | copies corrupted data to a table": "5807", "pg | CopyFrom/CopyTo | copies data from a table and to a table": "5807", "pg | CountEstimate | works": "17511", @@ -97,7 +97,7 @@ var gopgIgnoreList20_2 = gopgIgnoreList20_1 var gopgIgnoreList20_1 = gopgIgnoreList19_2 -var gopgIgnoreList19_2 = blacklist{ +var gopgIgnoreList19_2 = blocklist{ // These "fetching" tests assume a particular order when ORDER BY clause is // omitted from the query by the ORM itself. "pg | ORM slice model | fetches Book relations": "41690", diff --git a/pkg/cmd/roachtest/hibernate.go b/pkg/cmd/roachtest/hibernate.go index 51a9bb3a6774..5a89a932eee9 100644 --- a/pkg/cmd/roachtest/hibernate.go +++ b/pkg/cmd/roachtest/hibernate.go @@ -109,11 +109,11 @@ func registerHibernate(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, _, _ := hibernateBlacklists.getLists(version) + blocklistName, expectedFailures, _, _ := hibernateBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No hibernate blacklist defined for cockroach version %s", version) + t.Fatalf("No hibernate blocklist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s", version, blacklistName) + c.l.Printf("Running cockroach version %s, using blocklist %s", version, blocklistName) t.Status("running hibernate test suite, will take at least 3 hours") // When testing, it is helpful to run only a subset of the tests. To do so @@ -172,7 +172,7 @@ func registerHibernate(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "hibernate" /* ormName */, output, - blacklistName, expectedFailures, nil /* ignorelist */, version, latestTag, + blocklistName, expectedFailures, nil /* ignorelist */, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/hibernate_blacklist.go b/pkg/cmd/roachtest/hibernate_blocklist.go similarity index 99% rename from pkg/cmd/roachtest/hibernate_blacklist.go rename to pkg/cmd/roachtest/hibernate_blocklist.go index 1a65feab8bce..656fe4791b14 100644 --- a/pkg/cmd/roachtest/hibernate_blacklist.go +++ b/pkg/cmd/roachtest/hibernate_blocklist.go @@ -10,26 +10,26 @@ package main -var hibernateBlacklists = blacklistsForVersion{ - {"v2.0", "hibernateBlackList2_0", hibernateBlackList2_0, "", nil}, - {"v2.1", "hibernateBlackList2_1", hibernateBlackList2_1, "", nil}, - {"v2.2", "hibernateBlackList19_1", hibernateBlackList19_1, "", nil}, - {"v19.1", "hibernateBlackList19_1", hibernateBlackList19_1, "", nil}, - {"v19.2", "hibernateBlackList19_2", hibernateBlackList19_2, "", nil}, - {"v20.1", "hibernateBlackList20_1", hibernateBlackList20_1, "", nil}, - {"v20.2", "hibernateBlackList20_2", hibernateBlackList20_2, "", nil}, +var hibernateBlocklists = blocklistsForVersion{ + {"v2.0", "hibernateBlockList2_0", hibernateBlockList2_0, "", nil}, + {"v2.1", "hibernateBlockList2_1", hibernateBlockList2_1, "", nil}, + {"v2.2", "hibernateBlockList19_1", hibernateBlockList19_1, "", nil}, + {"v19.1", "hibernateBlockList19_1", hibernateBlockList19_1, "", nil}, + {"v19.2", "hibernateBlockList19_2", hibernateBlockList19_2, "", nil}, + {"v20.1", "hibernateBlockList20_1", hibernateBlockList20_1, "", nil}, + {"v20.2", "hibernateBlockList20_2", hibernateBlockList20_2, "", nil}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var hibernateBlackList20_2 = blacklist{} +var hibernateBlockList20_2 = blocklist{} -var hibernateBlackList20_1 = blacklist{} +var hibernateBlockList20_1 = blocklist{} -var hibernateBlackList19_2 = blacklist{} +var hibernateBlockList19_2 = blocklist{} -var hibernateBlackList19_1 = blacklist{ +var hibernateBlockList19_1 = blocklist{ "org.hibernate.jpa.test.criteria.QueryBuilderTest.testDateTimeFunctions": "31708", "org.hibernate.jpa.test.indetifier.AssignedInitialValueTableGeneratorConfiguredTest.testTheFirstGeneratedIdIsEqualToTableGeneratorInitialValuePlusOne": "6583", "org.hibernate.jpa.test.indetifier.AssignedInitialValueTableGeneratorConfiguredTest.testTheGeneratedIdValuesAreCorrect": "6583", @@ -125,7 +125,7 @@ var hibernateBlackList19_1 = blacklist{ "org.hibernate.test.tool.schema.SchemaToolTransactionHandlingTest.testValidateInExistingJtaTransaction": "16769", } -var hibernateBlackList2_1 = blacklist{ +var hibernateBlockList2_1 = blocklist{ "org.hibernate.id.hhh12973.SequenceMismatchStrategyDefaultExceptionTest.test": "unknown", "org.hibernate.id.hhh12973.SequenceMismatchStrategyExceptionEnumTest.test": "unknown", "org.hibernate.id.hhh12973.SequenceMismatchStrategyFixWithSequenceGeneratorTest.test": "unknown", @@ -246,7 +246,7 @@ var hibernateBlackList2_1 = blacklist{ "org.hibernate.test.tool.schema.SchemaToolTransactionHandlingTest.testValidateInExistingJtaTransaction": "16769", } -var hibernateBlackList2_0 = blacklist{ +var hibernateBlockList2_0 = blocklist{ "org.hibernate.engine.spi.ExtraStateTest.shouldMaintainExtraStateWhenUsingIdentityIdGenerationStrategy": "unknown", "org.hibernate.event.EmbeddableCallbackTest.test": "unknown", "org.hibernate.id.CreateDeleteTest.createAndDeleteAnEntityInTheSameTransactionTest": "unknown", diff --git a/pkg/cmd/roachtest/java_helpers.go b/pkg/cmd/roachtest/java_helpers.go index c313ab26c12e..2e017942645a 100644 --- a/pkg/cmd/roachtest/java_helpers.go +++ b/pkg/cmd/roachtest/java_helpers.go @@ -114,7 +114,7 @@ func extractFailureFromJUnitXML(contents []byte) ([]string, []status, map[string // parseJUnitXML parses testOutputInJUnitXMLFormat and updates the receiver // accordingly. func (r *ormTestsResults) parseJUnitXML( - t *test, expectedFailures, ignorelist blacklist, testOutputInJUnitXMLFormat []byte, + t *test, expectedFailures, ignorelist blocklist, testOutputInJUnitXMLFormat []byte, ) { tests, statuses, issueHints, err := extractFailureFromJUnitXML(testOutputInJUnitXMLFormat) if err != nil { @@ -170,7 +170,7 @@ func (r *ormTestsResults) parseJUnitXML( // parseAndSummarizeJavaORMTestsResults parses the test output of running a // test suite for some Java ORM against cockroach and summarizes it. If an // unexpected result is observed (for example, a test unexpectedly failed or -// passed), a new blacklist is populated. +// passed), a new blocklist is populated. func parseAndSummarizeJavaORMTestsResults( ctx context.Context, t *test, @@ -178,9 +178,9 @@ func parseAndSummarizeJavaORMTestsResults( node nodeListOption, ormName string, testOutput []byte, - blacklistName string, - expectedFailures blacklist, - ignorelist blacklist, + blocklistName string, + expectedFailures blocklist, + ignorelist blocklist, version string, latestTag string, ) { @@ -214,6 +214,6 @@ func parseAndSummarizeJavaORMTestsResults( } results.summarizeAll( - t, ormName, blacklistName, expectedFailures, version, latestTag, + t, ormName, blocklistName, expectedFailures, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/libpq.go b/pkg/cmd/roachtest/libpq.go index cc3776581bb4..c71c8b829616 100644 --- a/pkg/cmd/roachtest/libpq.go +++ b/pkg/cmd/roachtest/libpq.go @@ -81,11 +81,11 @@ func registerLibPQ(r *testRegistry) { _ = c.RunE(ctx, node, fmt.Sprintf("mkdir -p %s", resultsDir)) - blacklistName, expectedFailures, ignorelistName, ignoredFailures := libPQBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignoredFailures := libPQBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No lib/pq blacklist defined for cockroach version %s", version) + t.Fatalf("No lib/pq blocklist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignorelist %s", version, blacklistName, ignorelistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignorelist %s", version, blocklistName, ignorelistName) t.Status("running lib/pq test suite and collecting results") @@ -98,7 +98,7 @@ func registerLibPQ(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "lib/pq" /* ormName */, []byte(resultsPath), - blacklistName, expectedFailures, ignoredFailures, version, latestTag, + blocklistName, expectedFailures, ignoredFailures, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/libpq_blacklist.go b/pkg/cmd/roachtest/libpq_blocklist.go similarity index 92% rename from pkg/cmd/roachtest/libpq_blacklist.go rename to pkg/cmd/roachtest/libpq_blocklist.go index 30004c14f3ae..de54eafca864 100644 --- a/pkg/cmd/roachtest/libpq_blacklist.go +++ b/pkg/cmd/roachtest/libpq_blocklist.go @@ -10,15 +10,15 @@ package main -var libPQBlacklists = blacklistsForVersion{ - {"v19.2", "libPQBlacklist19_2", libPQBlacklist19_2, "libPQIgnorelist19_2", libPQIgnorelist19_2}, - {"v20.1", "libPQBlacklist20_1", libPQBlacklist20_1, "libPQIgnorelist20_1", libPQIgnorelist20_1}, - {"v20.2", "libPQBlacklist20_2", libPQBlacklist20_2, "libPQIgnorelist20_2", libPQIgnorelist20_2}, +var libPQBlocklists = blocklistsForVersion{ + {"v19.2", "libPQBlocklist19_2", libPQBlocklist19_2, "libPQIgnorelist19_2", libPQIgnorelist19_2}, + {"v20.1", "libPQBlocklist20_1", libPQBlocklist20_1, "libPQIgnorelist20_1", libPQIgnorelist20_1}, + {"v20.2", "libPQBlocklist20_2", libPQBlocklist20_2, "libPQIgnorelist20_2", libPQIgnorelist20_2}, } -var libPQBlacklist20_2 = libPQBlacklist20_1 +var libPQBlocklist20_2 = libPQBlocklist20_1 -var libPQBlacklist20_1 = blacklist{ +var libPQBlocklist20_1 = blocklist{ "pq.TestBinaryByteSliceToInt": "41547", "pq.TestBinaryByteSlicetoUUID": "41547", "pq.TestByteaOutputFormats": "26947", @@ -53,7 +53,7 @@ var libPQBlacklist20_1 = blacklist{ "pq.TestStringWithNul": "26366", } -var libPQBlacklist19_2 = blacklist{ +var libPQBlocklist19_2 = blocklist{ "pq.TestBinaryByteSliceToInt": "41547", "pq.TestBinaryByteSlicetoUUID": "41547", "pq.TestBindError": "5807", @@ -105,7 +105,7 @@ var libPQIgnorelist20_2 = libPQIgnorelist20_1 var libPQIgnorelist20_1 = libPQIgnorelist19_2 -var libPQIgnorelist19_2 = blacklist{ +var libPQIgnorelist19_2 = blocklist{ // TestFormatTsBacked fails due to not returning an error for accepting a // timestamp format that postgres does not. "pq.TestFormatTsBackend": "41690", diff --git a/pkg/cmd/roachtest/log.go b/pkg/cmd/roachtest/log.go index e1c5698c26c9..973034502e5e 100644 --- a/pkg/cmd/roachtest/log.go +++ b/pkg/cmd/roachtest/log.go @@ -250,7 +250,7 @@ func (l *logger) Printf(f string, args ...interface{}) { // which stack frame is reported as the file:line in the message. depth=1 is // equivalent to PrintfCtx. E.g. pass 2 to ignore the caller's frame. func (l *logger) PrintfCtxDepth(ctx context.Context, depth int, f string, args ...interface{}) { - msg := crdblog.MakeMessage(ctx, f, args) + msg := crdblog.FormatWithContextTags(ctx, f, args...) if err := l.stdoutL.Output(depth+1, msg); err != nil { // Changing our interface to return an Error from a logging method seems too // onerous. Let's yell to the default logger and if that fails, oh well. @@ -264,7 +264,7 @@ func (l *logger) ErrorfCtx(ctx context.Context, f string, args ...interface{}) { } func (l *logger) ErrorfCtxDepth(ctx context.Context, depth int, f string, args ...interface{}) { - msg := crdblog.MakeMessage(ctx, f, args) + msg := crdblog.FormatWithContextTags(ctx, f, args...) if err := l.stderrL.Output(depth+1, msg); err != nil { // Changing our interface to return an Error from a logging method seems too // onerous. Let's yell to the default logger and if that fails, oh well. diff --git a/pkg/cmd/roachtest/orm_helpers.go b/pkg/cmd/roachtest/orm_helpers.go index e0975156d9da..4af7759738dd 100644 --- a/pkg/cmd/roachtest/orm_helpers.go +++ b/pkg/cmd/roachtest/orm_helpers.go @@ -114,9 +114,9 @@ func newORMTestsResults() *ormTestsResults { // summarizeAll summarizes the result of running an ORM or a driver test suite // against a cockroach node. If an unexpected result is observed (for example, -// a test unexpectedly failed or passed), a new blacklist is populated. +// a test unexpectedly failed or passed), a new blocklist is populated. func (r *ormTestsResults) summarizeAll( - t *test, ormName, blacklistName string, expectedFailures blacklist, version, latestTag string, + t *test, ormName, blocklistName string, expectedFailures blocklist, version, latestTag string, ) { // Collect all the tests that were not run. notRunCount := 0 @@ -142,7 +142,7 @@ func (r *ormTestsResults) summarizeAll( t.l.Printf("------------------------\n") r.summarizeFailed( - t, ormName, blacklistName, expectedFailures, version, latestTag, notRunCount, + t, ormName, blocklistName, expectedFailures, version, latestTag, notRunCount, ) } @@ -152,8 +152,8 @@ func (r *ormTestsResults) summarizeAll( // If a test suite outputs only the failures, then this method should be used. func (r *ormTestsResults) summarizeFailed( t *test, - ormName, blacklistName string, - expectedFailures blacklist, + ormName, blocklistName string, + expectedFailures blocklist, version, latestTag string, notRunCount int, ) { @@ -193,11 +193,11 @@ func (r *ormTestsResults) summarizeFailed( if r.failUnexpectedCount > 0 || r.passUnexpectedCount > 0 || notRunCount > 0 || r.unexpectedSkipCount > 0 { - // Create a new blacklist so we can easily update this test. + // Create a new blocklist so we can easily update this test. sort.Strings(r.currentFailures) var b strings.Builder - fmt.Fprintf(&b, "Here is new %s blacklist that can be used to update the test:\n\n", ormName) - fmt.Fprintf(&b, "var %s = blacklist{\n", blacklistName) + fmt.Fprintf(&b, "Here is new %s blocklist that can be used to update the test:\n\n", ormName) + fmt.Fprintf(&b, "var %s = blocklist{\n", blocklistName) for _, test := range r.currentFailures { issue := expectedFailures[test] if len(issue) == 0 || issue == "unknown" { @@ -211,9 +211,9 @@ func (r *ormTestsResults) summarizeFailed( fmt.Fprintf(&b, "}\n\n") t.l.Printf("\n\n%s\n\n", b.String()) t.l.Printf("------------------------\n") - t.Fatalf("\n%s\nAn updated blacklist (%s) is available in the artifacts' %s log\n", + t.Fatalf("\n%s\nAn updated blocklist (%s) is available in the artifacts' %s log\n", bResults.String(), - blacklistName, + blocklistName, ormName, ) } diff --git a/pkg/cmd/roachtest/pgjdbc.go b/pkg/cmd/roachtest/pgjdbc.go index e295e4950731..5782dc8adff6 100644 --- a/pkg/cmd/roachtest/pgjdbc.go +++ b/pkg/cmd/roachtest/pgjdbc.go @@ -117,14 +117,14 @@ func registerPgjdbc(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignorelistName, ignorelist := pgjdbcBlacklists.getLists(version) + blocklistName, expectedFailures, ignorelistName, ignorelist := pgjdbcBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No pgjdbc blacklist defined for cockroach version %s", version) + t.Fatalf("No pgjdbc blocklist defined for cockroach version %s", version) } - status := fmt.Sprintf("Running cockroach version %s, using blacklist %s", version, blacklistName) + status := fmt.Sprintf("Running cockroach version %s, using blocklist %s", version, blocklistName) if ignorelist != nil { - status = fmt.Sprintf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + status = fmt.Sprintf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) } c.l.Printf("%s", status) @@ -173,7 +173,7 @@ func registerPgjdbc(r *testRegistry) { parseAndSummarizeJavaORMTestsResults( ctx, t, c, node, "pgjdbc" /* ormName */, output, - blacklistName, expectedFailures, ignorelist, version, latestTag, + blocklistName, expectedFailures, ignorelist, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/pgjdbc_blacklist.go b/pkg/cmd/roachtest/pgjdbc_blocklist.go similarity index 99% rename from pkg/cmd/roachtest/pgjdbc_blacklist.go rename to pkg/cmd/roachtest/pgjdbc_blocklist.go index af42084cf901..4f8edb00edd6 100644 --- a/pkg/cmd/roachtest/pgjdbc_blacklist.go +++ b/pkg/cmd/roachtest/pgjdbc_blocklist.go @@ -10,21 +10,21 @@ package main -var pgjdbcBlacklists = blacklistsForVersion{ - {"v2.1", "pgjdbcBlackList2_1", pgjdbcBlackList2_1, "", nil}, - {"v2.2", "pgjdbcBlackList19_1", pgjdbcBlackList19_1, "", pgjdbcIgnoreList19_1}, - {"v19.1", "pgjdbcBlackList19_1", pgjdbcBlackList19_1, "", pgjdbcIgnoreList19_1}, - {"v19.2", "pgjdbcBlackList19_2", pgjdbcBlackList19_2, "pgjdbcIgnoreList19_2", pgjdbcIgnoreList19_2}, - {"v20.1", "pgjdbcBlackList20_1", pgjdbcBlackList20_1, "pgjdbcIgnoreList20_1", pgjdbcIgnoreList20_1}, - {"v20.2", "pgjdbcBlackList20_2", pgjdbcBlackList20_2, "pgjdbcIgnoreList20_2", pgjdbcIgnoreList20_2}, +var pgjdbcBlocklists = blocklistsForVersion{ + {"v2.1", "pgjdbcBlockList2_1", pgjdbcBlockList2_1, "", nil}, + {"v2.2", "pgjdbcBlockList19_1", pgjdbcBlockList19_1, "", pgjdbcIgnoreList19_1}, + {"v19.1", "pgjdbcBlockList19_1", pgjdbcBlockList19_1, "", pgjdbcIgnoreList19_1}, + {"v19.2", "pgjdbcBlockList19_2", pgjdbcBlockList19_2, "pgjdbcIgnoreList19_2", pgjdbcIgnoreList19_2}, + {"v20.1", "pgjdbcBlockList20_1", pgjdbcBlockList20_1, "pgjdbcIgnoreList20_1", pgjdbcIgnoreList20_1}, + {"v20.2", "pgjdbcBlockList20_2", pgjdbcBlockList20_2, "pgjdbcIgnoreList20_2", pgjdbcIgnoreList20_2}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var pgjdbcBlackList20_2 = pgjdbcBlackList20_1 +var pgjdbcBlockList20_2 = pgjdbcBlockList20_1 -var pgjdbcBlackList20_1 = blacklist{ +var pgjdbcBlockList20_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -1149,7 +1149,7 @@ var pgjdbcBlackList20_1 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList19_1 = blacklist{ +var pgjdbcBlockList19_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -2666,7 +2666,7 @@ var pgjdbcBlackList19_1 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList19_2 = blacklist{ +var pgjdbcBlockList19_2 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -4138,7 +4138,7 @@ var pgjdbcBlackList19_2 = blacklist{ "org.postgresql.test.xa.XADataSourceTest.testWrapperEquals": "22329", } -var pgjdbcBlackList2_1 = blacklist{ +var pgjdbcBlockList2_1 = blocklist{ "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", @@ -5551,14 +5551,14 @@ var pgjdbcIgnoreList20_2 = pgjdbcIgnoreList20_1 var pgjdbcIgnoreList20_1 = pgjdbcIgnoreList19_2 -var pgjdbcIgnoreList19_2 = blacklist{ +var pgjdbcIgnoreList19_2 = blocklist{ "org.postgresql.replication.ReplicationTestSuite.org.postgresql.replication.ReplicationTestSuite": "expected fail - no replication", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithDefaults": "expected fail - checks error message", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithExplicitlyEnabled": "expected fail - checks error message", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithLogServerErrorDetailDisabled": "expected fail - checks error message", } -var pgjdbcIgnoreList19_1 = blacklist{ +var pgjdbcIgnoreList19_1 = blocklist{ "org.postgresql.replication.ReplicationTestSuite.org.postgresql.replication.ReplicationTestSuite": "expected fail - no replication", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithDefaults": "expected fail - checks error message", "org.postgresql.test.core.LogServerMessagePropertyTest.testWithExplicitlyEnabled": "expected fail - checks error message", diff --git a/pkg/cmd/roachtest/pgx.go b/pkg/cmd/roachtest/pgx.go index 231362d16624..c6f88dc75605 100644 --- a/pkg/cmd/roachtest/pgx.go +++ b/pkg/cmd/roachtest/pgx.go @@ -66,15 +66,15 @@ func registerPgx(r *testRegistry) { t.Fatal(err) } - t.Status("checking blacklist") - blacklistName, expectedFailures, ignorelistName, ignorelist := pgxBlacklists.getLists(version) + t.Status("checking blocklist") + blocklistName, expectedFailures, ignorelistName, ignorelist := pgxBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No pgx blacklist defined for cockroach version %s", version) + t.Fatalf("No pgx blocklist defined for cockroach version %s", version) } - status := fmt.Sprintf("Running cockroach version %s, using blacklist %s", version, blacklistName) + status := fmt.Sprintf("Running cockroach version %s, using blocklist %s", version, blocklistName) if ignorelist != nil { - status = fmt.Sprintf("Running cockroach version %s, using blacklist %s, using ignorelist %s", - version, blacklistName, ignorelistName) + status = fmt.Sprintf("Running cockroach version %s, using blocklist %s, using ignorelist %s", + version, blocklistName, ignorelistName) } c.l.Printf("%s", status) @@ -109,7 +109,7 @@ func registerPgx(r *testRegistry) { results := newORMTestsResults() results.parseJUnitXML(t, expectedFailures, ignorelist, xmlResults) results.summarizeAll( - t, "pgx", blacklistName, expectedFailures, version, latestTag, + t, "pgx", blocklistName, expectedFailures, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/pgx_blacklist.go b/pkg/cmd/roachtest/pgx_blocklist.go similarity index 94% rename from pkg/cmd/roachtest/pgx_blacklist.go rename to pkg/cmd/roachtest/pgx_blocklist.go index 6d7b33325542..3f3830a19029 100644 --- a/pkg/cmd/roachtest/pgx_blacklist.go +++ b/pkg/cmd/roachtest/pgx_blocklist.go @@ -10,18 +10,18 @@ package main -var pgxBlacklists = blacklistsForVersion{ - {"v19.2", "pgxBlacklist19_2", pgxBlacklist19_2, "pgxIgnorelist19_2", pgxIgnorelist19_2}, - {"v20.1", "pgxBlacklist20_1", pgxBlacklist20_1, "pgxIgnorelist20_1", pgxIgnorelist20_1}, - {"v20.2", "pgxBlacklist20_2", pgxBlacklist20_2, "pgxIgnorelist20_2", pgxIgnorelist20_2}, +var pgxBlocklists = blocklistsForVersion{ + {"v19.2", "pgxBlocklist19_2", pgxBlocklist19_2, "pgxIgnorelist19_2", pgxIgnorelist19_2}, + {"v20.1", "pgxBlocklist20_1", pgxBlocklist20_1, "pgxIgnorelist20_1", pgxIgnorelist20_1}, + {"v20.2", "pgxBlocklist20_2", pgxBlocklist20_2, "pgxIgnorelist20_2", pgxIgnorelist20_2}, } // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var pgxBlacklist20_2 = pgxBlacklist20_1 +var pgxBlocklist20_2 = pgxBlocklist20_1 -var pgxBlacklist20_1 = blacklist{ +var pgxBlocklist20_1 = blocklist{ "v4.Example_CustomType": "27796", "v4.TestConnBeginBatchDeferredError": "31632", "v4.TestConnCopyFromFailServerSideMidway": "19603", @@ -65,12 +65,12 @@ var pgxBlacklist20_1 = blacklist{ var pgxIgnorelist20_2 = pgxIgnorelist20_1 -var pgxIgnorelist20_1 = blacklist{ +var pgxIgnorelist20_1 = blocklist{ "v4.TestBeginIsoLevels": "We don't support isolation levels", "v4.TestQueryEncodeError": "This test checks the exact error message", } -var pgxBlacklist19_2 = blacklist{ +var pgxBlocklist19_2 = blocklist{ "v4.Example_CustomType": "27796", "v4.TestConnBeginBatchDeferredError": "31632", "v4.TestConnCopyFromCopyFromSourceErrorEnd": "5807", @@ -129,7 +129,7 @@ var pgxBlacklist19_2 = blacklist{ "v4.TestUnregisteredTypeUsableAsStringArgumentAndBaseResult": "27796", } -var pgxIgnorelist19_2 = blacklist{ +var pgxIgnorelist19_2 = blocklist{ "v4.TestBeginIsoLevels": "We don't support isolation levels", "v4.TestQueryEncodeError": "This test checks the exact error message", } diff --git a/pkg/cmd/roachtest/psycopg.go b/pkg/cmd/roachtest/psycopg.go index 8bb2271d9ebc..7384a7fcf7b8 100644 --- a/pkg/cmd/roachtest/psycopg.go +++ b/pkg/cmd/roachtest/psycopg.go @@ -90,15 +90,15 @@ func registerPsycopg(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignoredlistName, ignoredlist := psycopgBlacklists.getLists(version) + blocklistName, expectedFailures, ignoredlistName, ignoredlist := psycopgBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No psycopg blacklist defined for cockroach version %s", version) + t.Fatalf("No psycopg blocklist defined for cockroach version %s", version) } if ignoredlist == nil { t.Fatalf("No psycopg ignorelist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) t.Status("running psycopg test suite") // Note that this is expected to return an error, since the test suite @@ -119,7 +119,7 @@ func registerPsycopg(r *testRegistry) { results := newORMTestsResults() results.parsePythonUnitTestOutput(rawResults, expectedFailures, ignoredlist) results.summarizeAll( - t, "psycopg" /* ormName */, blacklistName, expectedFailures, + t, "psycopg" /* ormName */, blocklistName, expectedFailures, version, latestTag, ) } diff --git a/pkg/cmd/roachtest/psycopg_blacklist.go b/pkg/cmd/roachtest/psycopg_blocklist.go similarity index 99% rename from pkg/cmd/roachtest/psycopg_blacklist.go rename to pkg/cmd/roachtest/psycopg_blocklist.go index 56dcea06ec8d..0417167c3432 100644 --- a/pkg/cmd/roachtest/psycopg_blacklist.go +++ b/pkg/cmd/roachtest/psycopg_blocklist.go @@ -10,12 +10,12 @@ package main -var psycopgBlacklists = blacklistsForVersion{ - {"v2.2", "psycopgBlackList19_1", psycopgBlackList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, - {"v19.1", "psycopgBlackList19_1", psycopgBlackList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, - {"v19.2", "psycopgBlackList19_2", psycopgBlackList19_2, "psycopgIgnoreList19_2", psycopgIgnoreList19_2}, - {"v20.1", "psycopgBlackList20_1", psycopgBlackList20_1, "psycopgIgnoreList20_1", psycopgIgnoreList20_1}, - {"v20.2", "psycopgBlackList20_2", psycopgBlackList20_2, "psycopgIgnoreList20_2", psycopgIgnoreList20_2}, +var psycopgBlocklists = blocklistsForVersion{ + {"v2.2", "psycopgBlockList19_1", psycopgBlockList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, + {"v19.1", "psycopgBlockList19_1", psycopgBlockList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1}, + {"v19.2", "psycopgBlockList19_2", psycopgBlockList19_2, "psycopgIgnoreList19_2", psycopgIgnoreList19_2}, + {"v20.1", "psycopgBlockList20_1", psycopgBlockList20_1, "psycopgIgnoreList20_1", psycopgIgnoreList20_1}, + {"v20.2", "psycopgBlockList20_2", psycopgBlockList20_2, "psycopgIgnoreList20_2", psycopgIgnoreList20_2}, } // These are lists of known psycopg test errors and failures. @@ -27,11 +27,11 @@ var psycopgBlacklists = blacklistsForVersion{ // Any test on this list that is not run is reported as FAIL - not run // // Please keep these lists alphabetized for easy diffing. -// After a failed run, an updated version of this blacklist should be available +// After a failed run, an updated version of this blocklist should be available // in the test log. -var psycopgBlackList20_2 = psycopgBlackList20_1 +var psycopgBlockList20_2 = psycopgBlockList20_1 -var psycopgBlackList20_1 = blacklist{ +var psycopgBlockList20_1 = blocklist{ "tests.test_async.AsyncTests.test_async_callproc": "44701", "tests.test_async.AsyncTests.test_error": "44706", "tests.test_async.AsyncTests.test_flush_on_write": "44709", @@ -240,7 +240,7 @@ var psycopgBlackList20_1 = blacklist{ "tests.test_with.WithCursorTestCase.test_named_with_noop": "30352", } -var psycopgBlackList19_2 = blacklist{ +var psycopgBlockList19_2 = blocklist{ "tests.test_async.AsyncTests.test_async_after_async": "5807", "tests.test_async.AsyncTests.test_async_callproc": "5807", "tests.test_async.AsyncTests.test_async_connection_error_message": "5807", @@ -527,7 +527,7 @@ var psycopgBlackList19_2 = blacklist{ "tests.test_with.WithCursorTestCase.test_named_with_noop": "30352", } -var psycopgBlackList19_1 = blacklist{ +var psycopgBlockList19_1 = blocklist{ "tests.test_async.AsyncTests.test_async_after_async": "5807", "tests.test_async.AsyncTests.test_async_callproc": "5807", "tests.test_async.AsyncTests.test_async_connection_error_message": "5807", @@ -825,7 +825,7 @@ var psycopgIgnoreList20_1 = psycopgIgnoreList19_2 var psycopgIgnoreList19_2 = psycopgIgnoreList19_1 -var psycopgIgnoreList19_1 = blacklist{ +var psycopgIgnoreList19_1 = blocklist{ "tests.test_green.GreenTestCase.test_flush_on_write": "flakey", "tests.test_connection.TestConnectionInfo.test_backend_pid": "we return -1 for pg_backend_pid()", } diff --git a/pkg/cmd/roachtest/python_helpers.go b/pkg/cmd/roachtest/python_helpers.go index 53040104536b..98d19ce9d580 100644 --- a/pkg/cmd/roachtest/python_helpers.go +++ b/pkg/cmd/roachtest/python_helpers.go @@ -20,7 +20,7 @@ import ( var pythonUnitTestOutputRegex = regexp.MustCompile(`(?P.*) \((?P.*)\) \.\.\. (?P[^ ']*)(?: u?['"](?P.*)['"])?`) func (r *ormTestsResults) parsePythonUnitTestOutput( - input []byte, expectedFailures blacklist, ignoredList blacklist, + input []byte, expectedFailures blocklist, ignoredList blocklist, ) { scanner := bufio.NewScanner(bytes.NewReader(input)) for scanner.Scan() { diff --git a/pkg/cmd/roachtest/sqlalchemy.go b/pkg/cmd/roachtest/sqlalchemy.go index bee053fd7f70..101253b29ce9 100644 --- a/pkg/cmd/roachtest/sqlalchemy.go +++ b/pkg/cmd/roachtest/sqlalchemy.go @@ -155,12 +155,12 @@ func registerSQLAlchemy(r *testRegistry) { t.Fatal(err) } - blacklistName, expectedFailures, ignoredlistName, ignoredlist := sqlAlchemyBlacklists.getLists(version) + blocklistName, expectedFailures, ignoredlistName, ignoredlist := sqlAlchemyBlocklists.getLists(version) if expectedFailures == nil { - t.Fatalf("No sqlalchemy blacklist defined for cockroach version %s", version) + t.Fatalf("No sqlalchemy blocklist defined for cockroach version %s", version) } - c.l.Printf("Running cockroach version %s, using blacklist %s, using ignoredlist %s", - version, blacklistName, ignoredlistName) + c.l.Printf("Running cockroach version %s, using blocklist %s, using ignoredlist %s", + version, blocklistName, ignoredlistName) t.Status("running sqlalchemy test suite") // Note that this is expected to return an error, since the test suite @@ -222,7 +222,7 @@ func registerSQLAlchemy(r *testRegistry) { } results.summarizeAll( - t, "sqlalchemy" /* ormName */, blacklistName, expectedFailures, version, latestTag) + t, "sqlalchemy" /* ormName */, blocklistName, expectedFailures, version, latestTag) } r.Add(testSpec{ diff --git a/pkg/cmd/roachtest/sqlalchemy_blacklist.go b/pkg/cmd/roachtest/sqlalchemy_blocklist.go similarity index 97% rename from pkg/cmd/roachtest/sqlalchemy_blacklist.go rename to pkg/cmd/roachtest/sqlalchemy_blocklist.go index b7c21dcf9b21..12c87d484341 100644 --- a/pkg/cmd/roachtest/sqlalchemy_blacklist.go +++ b/pkg/cmd/roachtest/sqlalchemy_blocklist.go @@ -10,17 +10,17 @@ package main -var sqlAlchemyBlacklists = blacklistsForVersion{ - {"v2.1", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v19.1", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v19.2", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, - {"v20.1", "sqlAlchemyBlacklist20_1", sqlAlchemyBlacklist20_1, "sqlAlchemyIgnoreList20_1", sqlAlchemyIgnoreList20_1}, - {"v20.2", "sqlAlchemyBlacklist20_2", sqlAlchemyBlacklist20_2, "sqlAlchemyIgnoreList20_2", sqlAlchemyIgnoreList20_2}, +var sqlAlchemyBlocklists = blocklistsForVersion{ + {"v2.1", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v19.1", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v19.2", "sqlAlchemyBlocklist", sqlAlchemyBlocklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList}, + {"v20.1", "sqlAlchemyBlocklist20_1", sqlAlchemyBlocklist20_1, "sqlAlchemyIgnoreList20_1", sqlAlchemyIgnoreList20_1}, + {"v20.2", "sqlAlchemyBlocklist20_2", sqlAlchemyBlocklist20_2, "sqlAlchemyIgnoreList20_2", sqlAlchemyIgnoreList20_2}, } -var sqlAlchemyBlacklist20_2 = sqlAlchemyBlacklist20_1 +var sqlAlchemyBlocklist20_2 = sqlAlchemyBlocklist20_1 -var sqlAlchemyBlacklist20_1 = blacklist{ +var sqlAlchemyBlocklist20_1 = blocklist{ "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_col": "5807", "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "5807", "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_dialect_initialize": "5807", @@ -90,7 +90,7 @@ var sqlAlchemyBlacklist20_1 = blacklist{ "test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table_schema": "unknown", } -var sqlAlchemyBlacklist = blacklist{ +var sqlAlchemyBlocklist = blocklist{ "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_col": "5807", "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "5807", "test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_dialect_initialize": "5807", @@ -164,7 +164,7 @@ var sqlAlchemyIgnoreList20_2 = sqlAlchemyIgnoreList var sqlAlchemyIgnoreList20_1 = sqlAlchemyIgnoreList -var sqlAlchemyIgnoreList = blacklist{ +var sqlAlchemyIgnoreList = blocklist{ "test/dialect/test_suite.py::ExceptionTest_cockroachdb+psycopg2_9_5_0::test_integrity_error": "passes, but can't parse result", "test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table": "flaky", } diff --git a/pkg/config/system.go b/pkg/config/system.go index bbffa493092e..b4fb6654eaf9 100644 --- a/pkg/config/system.go +++ b/pkg/config/system.go @@ -147,8 +147,9 @@ func (s *SystemConfig) getSystemTenantDesc(key roachpb.Key) *roachpb.Value { // configs through proper channels. // // Getting here outside tests is impossible. + desc := sqlbase.NewImmutableTableDescriptor(sqlbase.TableDescriptor{}).DescriptorProto() var val roachpb.Value - if err := val.SetProto(sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{})); err != nil { + if err := val.SetProto(desc); err != nil { panic(err) } return &val diff --git a/pkg/config/system_test.go b/pkg/config/system_test.go index 6d69f5d56509..7e29b0a7cb6a 100644 --- a/pkg/config/system_test.go +++ b/pkg/config/system_test.go @@ -58,9 +58,9 @@ func sqlKV(tableID uint32, indexID, descID uint64) roachpb.KeyValue { func descriptor(descID uint64) roachpb.KeyValue { k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(descID)) - v := sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{}) + v := sqlbase.TableDescriptor{} kv := roachpb.KeyValue{Key: k} - if err := kv.Value.SetProto(v); err != nil { + if err := kv.Value.SetProto(v.DescriptorProto()); err != nil { panic(err) } return kv diff --git a/pkg/geo/encode.go b/pkg/geo/encode.go index 229ffcfee1df..4536bda8e71e 100644 --- a/pkg/geo/encode.go +++ b/pkg/geo/encode.go @@ -17,6 +17,9 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/geo/geopb" + "github.com/cockroachdb/cockroach/pkg/geo/geoprojbase" + "github.com/cockroachdb/errors" + "github.com/twpayne/go-geom" "github.com/twpayne/go-geom/encoding/ewkb" "github.com/twpayne/go-geom/encoding/geojson" "github.com/twpayne/go-geom/encoding/kml" @@ -26,7 +29,7 @@ import ( ) // EWKBToWKT transforms a given EWKB to WKT. -func EWKBToWKT(b geopb.EWKB) (geopb.WKT, error) { +func EWKBToWKT(b geopb.EWKB, maxDecimalDigits int) (geopb.WKT, error) { // twpayne/go-geom doesn't seem to handle POINT EMPTY just yet. Add this hack in. // Remove after #49209 is resolved. if bytes.Equal(b, []byte{0x01, 0x01, 0x00, 0x00, 0x00}) { @@ -36,12 +39,12 @@ func EWKBToWKT(b geopb.EWKB) (geopb.WKT, error) { if err != nil { return "", err } - ret, err := wkt.Marshal(t) + ret, err := wkt.Marshal(t, wkt.EncodeOptionWithMaxDecimalDigits(maxDecimalDigits)) return geopb.WKT(ret), err } // EWKBToEWKT transforms a given EWKB to EWKT. -func EWKBToEWKT(b geopb.EWKB) (geopb.EWKT, error) { +func EWKBToEWKT(b geopb.EWKB, maxDecimalDigits int) (geopb.EWKT, error) { // twpayne/go-geom doesn't seem to handle POINT EMPTY just yet. Add this hack in. // Remove after #49209 is resolved. if bytes.Equal(b, []byte{0x01, 0x01, 0x00, 0x00, 0x00}) { @@ -51,7 +54,7 @@ func EWKBToEWKT(b geopb.EWKB) (geopb.EWKT, error) { if err != nil { return "", err } - ret, err := wkt.Marshal(t) + ret, err := wkt.Marshal(t, wkt.EncodeOptionWithMaxDecimalDigits(maxDecimalDigits)) if err != nil { return "", err } @@ -71,17 +74,86 @@ func EWKBToWKB(b geopb.EWKB, byteOrder binary.ByteOrder) (geopb.WKB, error) { return geopb.WKB(ret), err } +// EWKBToGeoJSONFlag maps to the ST_AsGeoJSON flags for PostGIS. +type EWKBToGeoJSONFlag int + +// These should be kept with ST_AsGeoJSON in PostGIS. +// 0: means no option +// 1: GeoJSON BBOX +// 2: GeoJSON Short CRS (e.g EPSG:4326) +// 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326) +// 8: GeoJSON Short CRS if not EPSG:4326 (default) +const ( + EWKBToGeoJSONFlagIncludeBBox EWKBToGeoJSONFlag = 1 << (iota) + EWKBToGeoJSONFlagShortCRS + EWKBToGeoJSONFlagLongCRS + EWKBToGeoJSONFlagShortCRSIfNot4326 + + EWKBToGeoJSONFlagZero = 0 +) + +// geomToGeoJSONCRS converts a geom to its CRS GeoJSON form. +func geomToGeoJSONCRS(t geom.T, long bool) (*geojson.CRS, error) { + projection, ok := geoprojbase.Projection(geopb.SRID(t.SRID())) + if !ok { + return nil, errors.Newf("unknown SRID: %d", t.SRID()) + } + var prop string + if long { + prop = fmt.Sprintf("urn:ogc:def:crs:%s::%d", projection.AuthName, projection.AuthSRID) + } else { + prop = fmt.Sprintf("%s:%d", projection.AuthName, projection.AuthSRID) + } + crs := &geojson.CRS{ + Type: "name", + Properties: map[string]interface{}{ + "name": prop, + }, + } + return crs, nil +} + // EWKBToGeoJSON transforms a given EWKB to GeoJSON. -func EWKBToGeoJSON(b geopb.EWKB) ([]byte, error) { +func EWKBToGeoJSON(b geopb.EWKB, maxDecimalDigits int, flag EWKBToGeoJSONFlag) ([]byte, error) { t, err := ewkb.Unmarshal([]byte(b)) if err != nil { return nil, err } - f := geojson.Feature{ - // TODO(otan): add features once we have spatial_ref_sys. - Geometry: t, + options := []geojson.EncodeGeometryOption{ + geojson.EncodeGeometryWithMaxDecimalDigits(maxDecimalDigits), + } + if flag&EWKBToGeoJSONFlagIncludeBBox != 0 { + options = append( + options, + geojson.EncodeGeometryWithBBox(), + ) } - return f.MarshalJSON() + // Take CRS flag in order of precedence. + if t.SRID() != 0 { + if flag&EWKBToGeoJSONFlagLongCRS != 0 { + crs, err := geomToGeoJSONCRS(t, true /* long */) + if err != nil { + return nil, err + } + options = append(options, geojson.EncodeGeometryWithCRS(crs)) + } else if flag&EWKBToGeoJSONFlagShortCRS != 0 { + crs, err := geomToGeoJSONCRS(t, false /* long */) + if err != nil { + return nil, err + } + options = append(options, geojson.EncodeGeometryWithCRS(crs)) + } else if flag&EWKBToGeoJSONFlagShortCRSIfNot4326 != 0 { + if t.SRID() != 4326 { + crs, err := geomToGeoJSONCRS(t, false /* long */) + if err != nil { + return nil, err + } + options = append(options, geojson.EncodeGeometryWithCRS(crs)) + } + } + } + + return geojson.Marshal(t, options...) } // EWKBToWKBHex transforms a given EWKB to WKBHex. diff --git a/pkg/geo/encode_test.go b/pkg/geo/encode_test.go index 06d2c79a381f..662d2a5b19bb 100644 --- a/pkg/geo/encode_test.go +++ b/pkg/geo/encode_test.go @@ -19,18 +19,20 @@ import ( func TestEWKBToWKT(t *testing.T) { testCases := []struct { - ewkt geopb.EWKT - expected geopb.WKT + ewkt geopb.EWKT + maxDecimalDigits int + expected geopb.WKT }{ - {"POINT(1.0 1.0)", "POINT (1 1)"}, - {"SRID=4;POINT(1.0 1.0)", "POINT (1 1)"}, + {"POINT(1.01 1.01)", 15, "POINT (1.01 1.01)"}, + {"POINT(1.01 1.01)", 1, "POINT (1 1)"}, + {"SRID=4;POINT(1.0 1.0)", 15, "POINT (1 1)"}, } for _, tc := range testCases { t.Run(string(tc.ewkt), func(t *testing.T) { so, err := parseEWKT(tc.ewkt, geopb.DefaultGeometrySRID, DefaultSRIDIsHint) require.NoError(t, err) - encoded, err := EWKBToWKT(so.EWKB) + encoded, err := EWKBToWKT(so.EWKB, tc.maxDecimalDigits) require.NoError(t, err) require.Equal(t, tc.expected, encoded) }) @@ -39,18 +41,20 @@ func TestEWKBToWKT(t *testing.T) { func TestEWKBToEWKT(t *testing.T) { testCases := []struct { - ewkt geopb.EWKT - expected geopb.EWKT + ewkt geopb.EWKT + maxDecimalDigits int + expected geopb.EWKT }{ - {"POINT(1.0 1.0)", "POINT (1 1)"}, - {"SRID=4;POINT(1.0 1.0)", "SRID=4;POINT (1 1)"}, + {"POINT(1.01 1.01)", 15, "POINT (1.01 1.01)"}, + {"POINT(1.01 1.01)", 1, "POINT (1 1)"}, + {"SRID=4;POINT(1.0 1.0)", 15, "SRID=4;POINT (1 1)"}, } for _, tc := range testCases { t.Run(string(tc.ewkt), func(t *testing.T) { so, err := parseEWKT(tc.ewkt, geopb.DefaultGeometrySRID, DefaultSRIDIsHint) require.NoError(t, err) - encoded, err := EWKBToEWKT(so.EWKB) + encoded, err := EWKBToEWKT(so.EWKB, tc.maxDecimalDigits) require.NoError(t, err) require.Equal(t, tc.expected, encoded) }) @@ -80,17 +84,31 @@ func TestEWKBToWKB(t *testing.T) { func TestEWKBToGeoJSON(t *testing.T) { testCases := []struct { ewkt geopb.EWKT + flag EWKBToGeoJSONFlag expected string }{ - {"POINT(1.0 1.0)", `{"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null}`}, - {"SRID=4;POINT(1.0 1.0)", `{"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagZero, `{"type":"Point","coordinates":[1,1]}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagIncludeBBox, `{"type":"Point","bbox":[1,1,1,1],"coordinates":[1,1]}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRS | EWKBToGeoJSONFlagIncludeBBox, `{"type":"Point","bbox":[1,1,1,1],"coordinates":[1,1]}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRS, `{"type":"Point","coordinates":[1,1]}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagLongCRS, `{"type":"Point","coordinates":[1,1]}`}, + {"POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRSIfNot4326, `{"type":"Point","coordinates":[1,1]}`}, + {"POINT(1.1234567 1.9876543)", EWKBToGeoJSONFlagShortCRSIfNot4326, `{"type":"Point","coordinates":[1.123457,1.987654]}`}, + {"SRID=4326;POINT(1.0 1.0)", EWKBToGeoJSONFlagZero, `{"type":"Point","coordinates":[1,1]}`}, + {"SRID=4326;POINT(1.0 1.0)", EWKBToGeoJSONFlagIncludeBBox, `{"type":"Point","bbox":[1,1,1,1],"coordinates":[1,1]}`}, + {"SRID=4326;POINT(1.0 1.0)", EWKBToGeoJSONFlagLongCRS, `{"type":"Point","crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,1]}`}, + {"SRID=4326;POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRS, `{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"coordinates":[1,1]}`}, + {"SRID=4004;POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRS, `{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4004"}},"coordinates":[1,1]}`}, + {"SRID=4004;POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRS | EWKBToGeoJSONFlagIncludeBBox, `{"type":"Point","bbox":[1,1,1,1],"crs":{"type":"name","properties":{"name":"EPSG:4004"}},"coordinates":[1,1]}`}, + {"SRID=4326;POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRSIfNot4326, `{"type":"Point","coordinates":[1,1]}`}, + {"SRID=4004;POINT(1.0 1.0)", EWKBToGeoJSONFlagShortCRSIfNot4326, `{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4004"}},"coordinates":[1,1]}`}, } for _, tc := range testCases { t.Run(string(tc.ewkt), func(t *testing.T) { so, err := parseEWKT(tc.ewkt, geopb.DefaultGeometrySRID, DefaultSRIDIsHint) require.NoError(t, err) - encoded, err := EWKBToGeoJSON(so.EWKB) + encoded, err := EWKBToGeoJSON(so.EWKB, 6, tc.flag) require.NoError(t, err) require.Equal(t, tc.expected, string(encoded)) }) diff --git a/pkg/geo/geo_test.go b/pkg/geo/geo_test.go index 4cf21e09577d..7b142b0b5a8d 100644 --- a/pkg/geo/geo_test.go +++ b/pkg/geo/geo_test.go @@ -16,9 +16,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/geo/geopb" - "github.com/cockroachdb/cockroach/pkg/geo/geos" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/datadriven" "github.com/golang/geo/s2" "github.com/stretchr/testify/require" "github.com/twpayne/go-geom" @@ -474,42 +471,3 @@ func TestGeographyAsS2(t *testing.T) { }) } } - -func TestClipEWKBByRect(t *testing.T) { - defer leaktest.AfterTest(t)() - - var g *Geometry - var err error - datadriven.RunTest(t, "testdata/clip", func(t *testing.T, d *datadriven.TestData) string { - switch d.Cmd { - case "geometry": - g, err = ParseGeometry(d.Input) - if err != nil { - return err.Error() - } - return "" - case "clip": - var xMin, yMin, xMax, yMax int - d.ScanArgs(t, "xmin", &xMin) - d.ScanArgs(t, "ymin", &yMin) - d.ScanArgs(t, "xmax", &xMax) - d.ScanArgs(t, "ymax", &yMax) - ewkb, err := geos.ClipEWKBByRect( - g.EWKB(), float64(xMin), float64(yMin), float64(xMax), float64(yMax)) - if err != nil { - return err.Error() - } - // TODO(sumeer): - // - add WKB to WKT and print exact output - // - expand test with more inputs - return fmt.Sprintf( - "%d => %d (srid: %d)", - len(g.EWKB()), - len(ewkb), - g.SRID(), - ) - default: - return fmt.Sprintf("unknown command: %s", d.Cmd) - } - }) -} diff --git a/pkg/geo/geogfn/unary_operators.go b/pkg/geo/geogfn/unary_operators.go index e5ce3ccf7c05..128c7f76a175 100644 --- a/pkg/geo/geogfn/unary_operators.go +++ b/pkg/geo/geogfn/unary_operators.go @@ -11,9 +11,12 @@ package geogfn import ( + "math" + "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geographiclib" "github.com/cockroachdb/errors" + "github.com/golang/geo/s1" "github.com/golang/geo/s2" "github.com/twpayne/go-geom" ) @@ -90,6 +93,43 @@ func Length(g *geo.Geography, useSphereOrSpheroid UseSphereOrSpheroid) (float64, return length(regions, useSphereOrSpheroid) } +// Project returns calculate a projected point given a source point, a distance and a azimuth. +func Project(point *geom.Point, distance float64, azimuth s1.Angle) (*geom.Point, error) { + spheroid := geographiclib.WGS84Spheroid + + // Normalize distance to be positive. + if distance < 0.0 { + distance = -distance + azimuth += math.Pi + } + + // Normalize azimuth + azimuth = azimuth.Normalized() + + // Check the distance validity. + if distance > (math.Pi * spheroid.Radius) { + return nil, errors.Newf("distance must not be greater than %f", math.Pi*spheroid.Radius) + } + + // Convert to ta geodetic point. + x := point.X() + y := point.Y() + + projected := spheroid.Project( + s2.LatLngFromDegrees(x, y), + distance, + azimuth, + ) + + return geom.NewPointFlat( + geom.XY, + []float64{ + float64(projected.Lng.Normalized()) * 180.0 / math.Pi, + normalizeLatitude(float64(projected.Lat)) * 180.0 / math.Pi, + }, + ), nil +} + // length returns the sum of the lengtsh and perimeters in the shapes of the Geography. // In OGC parlance, length returns both LineString lengths _and_ Polygon perimeters. func length(regions []s2.Region, useSphereOrSpheroid UseSphereOrSpheroid) (float64, error) { @@ -128,3 +168,32 @@ func length(regions []s2.Region, useSphereOrSpheroid UseSphereOrSpheroid) (float } return totalLength, nil } + +// normalizeLatitude convert a latitude to the range of -Pi/2, Pi/2. +func normalizeLatitude(lat float64) float64 { + if lat > 2.0*math.Pi { + lat = math.Remainder(lat, 2.0*math.Pi) + } + + if lat < -2.0*math.Pi { + lat = math.Remainder(lat, -2.0*math.Pi) + } + + if lat > math.Pi { + lat = math.Pi - lat + } + + if lat < -1.0*math.Pi { + lat = -1.0*math.Pi - lat + } + + if lat > math.Pi*2 { + lat = math.Pi - lat + } + + if lat < -1.0*math.Pi*2 { + lat = -1.0*math.Pi - lat + } + + return lat +} diff --git a/pkg/geo/geogfn/unary_operators_test.go b/pkg/geo/geogfn/unary_operators_test.go index 6149ad6762cb..a6918407ce32 100644 --- a/pkg/geo/geogfn/unary_operators_test.go +++ b/pkg/geo/geogfn/unary_operators_test.go @@ -15,7 +15,9 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/golang/geo/s1" "github.com/stretchr/testify/require" + "github.com/twpayne/go-geom" ) type unaryOperatorExpectedResult struct { @@ -219,3 +221,35 @@ func TestLength(t *testing.T) { }) } } + +func TestProject(t *testing.T) { + var testCases = []struct { + desc string + point *geom.Point + distance float64 + azimuth float64 + projected *geom.Point + }{ + { + "POINT(0 0), 100000, radians(45)", + geom.NewPointFlat(geom.XY, []float64{0, 0}), + 100000, + 45 * math.Pi / 180.0, + geom.NewPointFlat(geom.XY, []float64{0.6352310291255374, 0.6394723347291977}), + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + projected, err := Project(tc.point, tc.distance, s1.Angle(tc.azimuth)) + require.NoError(t, err) + require.Equalf( + t, + tc.projected, + projected, + "expected %f, found %f", + &tc.projected, + projected) + }) + } +} diff --git a/pkg/geo/geographiclib/geographiclib.go b/pkg/geo/geographiclib/geographiclib.go index e1fc0a67eb34..c63ba65747c8 100644 --- a/pkg/geo/geographiclib/geographiclib.go +++ b/pkg/geo/geographiclib/geographiclib.go @@ -18,7 +18,12 @@ package geographiclib // #include "geographiclib.h" import "C" -import "github.com/golang/geo/s2" +import ( + "math" + + "github.com/golang/geo/s1" + "github.com/golang/geo/s2" +) var ( // WGS84Spheroid represents the default WGS84 ellipsoid. @@ -109,3 +114,23 @@ func (s *Spheroid) AreaAndPerimeter(points []s2.Point) (area float64, perimeter ) return float64(areaDouble), float64(perimeterDouble) } + +// Project returns computes the location of the projected point. +// +// Using the direct geodesic problem from GeographicLib (Karney 2013). +func (s *Spheroid) Project(point s2.LatLng, distance float64, azimuth s1.Angle) s2.LatLng { + var lat, lng C.double + + C.geod_direct( + &s.cRepr, + C.double(point.Lat.Degrees()), + C.double(point.Lng.Degrees()), + C.double(azimuth*180.0/math.Pi), + C.double(distance), + &lat, + &lng, + nil, + ) + + return s2.LatLngFromDegrees(float64(lat), float64(lng)) +} diff --git a/pkg/geo/geographiclib/geographiclib_test.go b/pkg/geo/geographiclib/geographiclib_test.go index 0d051b86365f..b383db8921e7 100644 --- a/pkg/geo/geographiclib/geographiclib_test.go +++ b/pkg/geo/geographiclib/geographiclib_test.go @@ -11,8 +11,10 @@ package geographiclib import ( + "math" "testing" + "github.com/golang/geo/s1" "github.com/golang/geo/s2" "github.com/stretchr/testify/require" ) @@ -101,3 +103,30 @@ func TestAreaAndPerimeter(t *testing.T) { }) } } + +func TestProject(t *testing.T) { + testCases := []struct { + desc string + spheroid Spheroid + point s2.LatLng + distance float64 + azimuth float64 + project s2.LatLng + }{ + { + desc: "{0,0} project to 100000, radians(45.0) on WGS84Spheroid", + spheroid: WGS84Spheroid, + point: s2.LatLng{Lat: 0, Lng: 0}, + distance: 100000, + azimuth: 45 * math.Pi / 180.0, + project: s2.LatLng{Lat: 0.011160897716439782, Lng: 0.011086872969072624}, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + project := tc.spheroid.Project(tc.point, tc.distance, s1.Angle(tc.azimuth)) + require.Equal(t, tc.project, project) + }) + } +} diff --git a/pkg/geo/geoindex/s2_geometry_index_test.go b/pkg/geo/geoindex/s2_geometry_index_test.go index b64c575c7f68..3611054ae8f2 100644 --- a/pkg/geo/geoindex/s2_geometry_index_test.go +++ b/pkg/geo/geoindex/s2_geometry_index_test.go @@ -16,6 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/cockroachdb/cockroach/pkg/geo/geos" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/datadriven" ) @@ -65,3 +66,42 @@ func TestS2GeometryIndexBasic(t *testing.T) { } }) } + +func TestClipEWKBByRect(t *testing.T) { + defer leaktest.AfterTest(t)() + + var g *geo.Geometry + var err error + datadriven.RunTest(t, "testdata/clip", func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "geometry": + g, err = geo.ParseGeometry(d.Input) + if err != nil { + return err.Error() + } + return "" + case "clip": + var xMin, yMin, xMax, yMax int + d.ScanArgs(t, "xmin", &xMin) + d.ScanArgs(t, "ymin", &yMin) + d.ScanArgs(t, "xmax", &xMax) + d.ScanArgs(t, "ymax", &yMax) + ewkb, err := geos.ClipEWKBByRect( + g.EWKB(), float64(xMin), float64(yMin), float64(xMax), float64(yMax)) + if err != nil { + return err.Error() + } + // TODO(sumeer): + // - add WKB to WKT and print exact output + // - expand test with more inputs + return fmt.Sprintf( + "%d => %d (srid: %d)", + len(g.EWKB()), + len(ewkb), + g.SRID(), + ) + default: + return fmt.Sprintf("unknown command: %s", d.Cmd) + } + }) +} diff --git a/pkg/geo/testdata/clip b/pkg/geo/geoindex/testdata/clip similarity index 100% rename from pkg/geo/testdata/clip rename to pkg/geo/geoindex/testdata/clip diff --git a/pkg/geo/geopb/geopb.go b/pkg/geo/geopb/geopb.go index 0de9965b58e1..119eee414706 100644 --- a/pkg/geo/geopb/geopb.go +++ b/pkg/geo/geopb/geopb.go @@ -10,12 +10,9 @@ package geopb -import ( - "encoding/hex" - "strings" -) +import "fmt" // EWKBHex returns the EWKB-hex version of this data type func (b *SpatialObject) EWKBHex() string { - return strings.ToUpper(hex.EncodeToString(b.EWKB)) + return fmt.Sprintf("%X", b.EWKB) } diff --git a/pkg/geo/geoproj/geoproj.go b/pkg/geo/geoproj/geoproj.go index 9d2bdd7345bf..044dbf6255e4 100644 --- a/pkg/geo/geoproj/geoproj.go +++ b/pkg/geo/geoproj/geoproj.go @@ -13,9 +13,9 @@ package geoproj // #cgo CXXFLAGS: -std=c++14 // #cgo CPPFLAGS: -I../../../c-deps/proj/src -// #cgo LDFLAGS: -lproj +// #cgo !windows LDFLAGS: -lproj // #cgo linux LDFLAGS: -lrt -lm -lpthread -// #cgo windows LDFLAGS: -lshlwapi -lrpcrt4 +// #cgo windows LDFLAGS: -lproj_4_9 -lshlwapi -lrpcrt4 // // #include "proj.h" import "C" diff --git a/pkg/geo/geoprojbase/geoprojbase.go b/pkg/geo/geoprojbase/geoprojbase.go index 36d6e890b17c..68c41ecd2cd8 100644 --- a/pkg/geo/geoprojbase/geoprojbase.go +++ b/pkg/geo/geoprojbase/geoprojbase.go @@ -27,10 +27,15 @@ type Proj4Text struct { // MakeProj4Text returns a new Proj4Text with spec based on the given string. func MakeProj4Text(str string) Proj4Text { return Proj4Text{ - cStr: []byte(str + `\0`), + cStr: []byte(str + "\u0000"), } } +// String returns the string representation of the given proj text. +func (p *Proj4Text) String() string { + return string(p.cStr[:len(p.cStr)-1]) +} + // Bytes returns the raw bytes for the given proj text. func (p *Proj4Text) Bytes() []byte { return p.cStr @@ -62,6 +67,6 @@ type ProjInfo struct { // Projection returns the ProjInfo identifier for the given SRID, as well as an bool // indicating whether the projection exists. func Projection(srid geopb.SRID) (ProjInfo, bool) { - p, exists := projections[srid] + p, exists := Projections[srid] return p, exists } diff --git a/pkg/geo/geoprojbase/projections.go b/pkg/geo/geoprojbase/projections.go index cef98c17fe69..0f9f5e2450ed 100644 --- a/pkg/geo/geoprojbase/projections.go +++ b/pkg/geo/geoprojbase/projections.go @@ -12,9 +12,10 @@ package geoprojbase import "github.com/cockroachdb/cockroach/pkg/geo/geopb" -// projections is a mapping of SRID to projections. +// Projections is a mapping of SRID to projections. +// Use the `Projection` function to obtain one. // This file is not spell checked. -var projections = map[geopb.SRID]ProjInfo{ +var Projections = map[geopb.SRID]ProjInfo{ 4326: { SRID: 4326, AuthName: "EPSG", diff --git a/pkg/geo/parse.go b/pkg/geo/parse.go index 4101c16faf39..3f05493c2f5f 100644 --- a/pkg/geo/parse.go +++ b/pkg/geo/parse.go @@ -99,12 +99,10 @@ func parseWKB(b []byte, defaultSRID geopb.SRID) (geopb.SpatialObject, error) { // parseGeoJSON takes given bytes assumed to be GeoJSON and transforms it into a SpatialObject. func parseGeoJSON(b []byte, defaultSRID geopb.SRID) (geopb.SpatialObject, error) { - var f geojson.Feature - if err := f.UnmarshalJSON(b); err != nil { + var t geom.T + if err := geojson.Unmarshal(b, &t); err != nil { return geopb.SpatialObject{}, err } - t := f.Geometry - // TODO(otan): check SRID from properties. if defaultSRID != 0 && t.SRID() == 0 { adjustGeomSRID(t, defaultSRID) } diff --git a/pkg/geo/parse_test.go b/pkg/geo/parse_test.go index 3427e6f90252..bd0d55c22766 100644 --- a/pkg/geo/parse_test.go +++ b/pkg/geo/parse_test.go @@ -225,7 +225,7 @@ func TestParseGeometry(t *testing.T) { "", }, { - `{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [1.0, 1.0] }, "properties": { "name": "┳━┳ ヽ(ಠل͜ಠ)ノ" } }`, + `{ "type": "Point", "coordinates": [1.0, 1.0] }`, &Geometry{ SpatialObject: geopb.SpatialObject{ EWKB: []byte("\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x3f\x00\x00\x00\x00\x00\x00\xf0\x3f"), @@ -367,7 +367,7 @@ func TestParseGeography(t *testing.T) { "", }, { - `{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [1.0, 1.0] }, "properties": { "name": "┳━┳ ヽ(ಠل͜ಠ)ノ" } }`, + `{ "type": "Point", "coordinates": [1.0, 1.0] }`, &Geography{ SpatialObject: geopb.SpatialObject{ EWKB: []byte("\x01\x01\x00\x00\x20\xe6\x10\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x3f\x00\x00\x00\x00\x00\x00\xf0\x3f"), diff --git a/pkg/gossip/gossip.go b/pkg/gossip/gossip.go index ca6423effe58..e4f257b21c30 100644 --- a/pkg/gossip/gossip.go +++ b/pkg/gossip/gossip.go @@ -72,6 +72,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -275,7 +276,7 @@ type Gossip struct { localityTierMap map[string]struct{} - lastConnectivity string + lastConnectivity redact.RedactableString defaultZoneConfig *zonepb.ZoneConfig } @@ -561,21 +562,23 @@ func (g *Gossip) GetNodeDescriptor(nodeID roachpb.NodeID) (*roachpb.NodeDescript func (g *Gossip) LogStatus() { g.mu.RLock() n := len(g.nodeDescs) - status := "ok" + status := redact.SafeString("ok") if g.mu.is.getInfo(KeySentinel) == nil { - status = "stalled" + status = redact.SafeString("stalled") } g.mu.RUnlock() - var connectivity string - if s := g.Connectivity().String(); s != g.lastConnectivity { + var connectivity redact.RedactableString + if s := redact.Sprint(g.Connectivity()); s != g.lastConnectivity { g.lastConnectivity = s connectivity = s } ctx := g.AnnotateCtx(context.TODO()) log.Infof(ctx, "gossip status (%s, %d node%s)\n%s%s%s", - status, n, util.Pluralize(int64(n)), g.clientStatus(), g.server.status(), connectivity) + status, n, util.Pluralize(int64(n)), + g.clientStatus(), g.server.status(), + connectivity) } func (g *Gossip) clientStatus() ClientStatus { diff --git a/pkg/gossip/status.go b/pkg/gossip/status.go index 5c0ae33338b0..eb34cf8d8b6e 100644 --- a/pkg/gossip/status.go +++ b/pkg/gossip/status.go @@ -11,11 +11,10 @@ package gossip import ( - "bytes" - "fmt" "time" "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/redact" ) // Metrics contains gossip metrics used per node and server. @@ -38,7 +37,7 @@ func makeMetrics() Metrics { } func (m Metrics) String() string { - return m.Snapshot().String() + return redact.StringWithoutMarkers(m.Snapshot()) } // Snapshot returns a snapshot of the metrics. @@ -53,54 +52,81 @@ func (m Metrics) Snapshot() MetricSnap { } func (m MetricSnap) String() string { - s := fmt.Sprintf("infos %d/%d sent/received, bytes %dB/%dB sent/received", - m.InfosSent, m.InfosReceived, m.BytesSent, m.BytesReceived) + return redact.StringWithoutMarkers(m) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (m MetricSnap) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("infos %d/%d sent/received, bytes %dB/%dB sent/received", + m.InfosSent, m.InfosReceived, + m.BytesSent, m.BytesReceived) if m.ConnsRefused > 0 { - s += fmt.Sprintf(", refused %d conns", m.ConnsRefused) + w.Printf(", refused %d conns", m.ConnsRefused) } - return s } func (c OutgoingConnStatus) String() string { - return fmt.Sprintf("%d: %s (%s: %s)", - c.NodeID, c.Address, roundSecs(time.Duration(c.AgeNanos)), c.MetricSnap) + return redact.StringWithoutMarkers(c) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (c OutgoingConnStatus) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("%d: %s (%s: %s)", + c.NodeID, c.Address, + roundSecs(time.Duration(c.AgeNanos)), c.MetricSnap) } func (c ClientStatus) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "gossip client (%d/%d cur/max conns)\n", len(c.ConnStatus), c.MaxConns) + return redact.StringWithoutMarkers(c) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (c ClientStatus) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("gossip client (%d/%d cur/max conns)\n", + len(c.ConnStatus), c.MaxConns) for _, conn := range c.ConnStatus { - fmt.Fprintf(&buf, " %s\n", conn) + w.Printf(" %s\n", conn) } - return buf.String() } func (c ConnStatus) String() string { - return fmt.Sprintf("%d: %s (%s)", c.NodeID, c.Address, roundSecs(time.Duration(c.AgeNanos))) + return redact.StringWithoutMarkers(c) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (c ConnStatus) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("%d: %s (%s)", c.NodeID, c.Address, + roundSecs(time.Duration(c.AgeNanos))) } func (s ServerStatus) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "gossip server (%d/%d cur/max conns, %s)\n", + return redact.StringWithoutMarkers(s) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (s ServerStatus) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("gossip server (%d/%d cur/max conns, %s)\n", len(s.ConnStatus), s.MaxConns, s.MetricSnap) for _, conn := range s.ConnStatus { - fmt.Fprintf(&buf, " %s\n", conn) + w.Printf(" %s\n", conn) } - return buf.String() } func (c Connectivity) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "gossip connectivity\n") + return redact.StringWithoutMarkers(c) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (c Connectivity) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("gossip connectivity\n") if c.SentinelNodeID != 0 { - fmt.Fprintf(&buf, " n%d [sentinel];\n", c.SentinelNodeID) + w.Printf(" n%d [sentinel];\n", c.SentinelNodeID) } if len(c.ClientConns) > 0 { - fmt.Fprintf(&buf, " ") + w.SafeRune(' ') for _, conn := range c.ClientConns { - fmt.Fprintf(&buf, " n%d -> n%d;", conn.SourceID, conn.TargetID) + w.Printf(" n%d -> n%d;", conn.SourceID, conn.TargetID) } - fmt.Fprintf(&buf, "\n") + w.SafeRune('\n') } - return buf.String() } diff --git a/pkg/jobs/jobs.go b/pkg/jobs/jobs.go index a227bff7eac3..4e8e6009f490 100644 --- a/pkg/jobs/jobs.go +++ b/pkg/jobs/jobs.go @@ -361,11 +361,11 @@ func (j *Job) paused(ctx context.Context, fn func(context.Context, *kv.Txn) erro }) } -// resumed sets the status of the tracked job to running or reverting iff the +// unpaused sets the status of the tracked job to running or reverting iff the // job is currently paused. It does not directly resume the job; rather, it // expires the job's lease so that a Registry adoption loop detects it and // resumes it. -func (j *Job) resumed(ctx context.Context) error { +func (j *Job) unpaused(ctx context.Context) error { return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusRunning || md.Status == StatusReverting { // Already resumed - do nothing. diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index d1c4ca5eca1b..97337a7fc7af 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -1094,11 +1094,11 @@ func TestJobLifecycle(t *testing.T) { if err := exp.verify(job.ID(), jobs.StatusPaused); err != nil { t.Fatal(err) } - if err := registry.Resume(ctx, nil, *job.ID()); err != nil { + if err := registry.Unpause(ctx, nil, *job.ID()); err != nil { t.Fatal(err) } // Resume the job again to ensure that the resumption is idempotent. - if err := registry.Resume(ctx, nil, *job.ID()); err != nil { + if err := registry.Unpause(ctx, nil, *job.ID()); err != nil { t.Fatal(err) } if err := exp.verify(job.ID(), jobs.StatusRunning); err != nil { @@ -1172,7 +1172,7 @@ func TestJobLifecycle(t *testing.T) { if err := registry.CancelRequested(ctx, nil, *job.ID()); err != nil { t.Fatal(err) } - if err := registry.Resume(ctx, nil, *job.ID()); !testutils.IsError(err, "cannot be resumed") { + if err := registry.Unpause(ctx, nil, *job.ID()); !testutils.IsError(err, "cannot be resumed") { t.Errorf("got unexpected status '%v'", err) } } @@ -1183,7 +1183,7 @@ func TestJobLifecycle(t *testing.T) { t.Fatal(err) } expectedErr := fmt.Sprintf("job with status %s cannot be resumed", jobs.StatusSucceeded) - if err := registry.Resume(ctx, nil, *job.ID()); !testutils.IsError(err, expectedErr) { + if err := registry.Unpause(ctx, nil, *job.ID()); !testutils.IsError(err, expectedErr) { t.Errorf("expected '%s', but got '%v'", expectedErr, err) } } diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index 5abe7f03ce33..9849a053115a 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -709,13 +709,14 @@ func (r *Registry) Failed(ctx context.Context, txn *kv.Txn, id int64, causingErr return job.WithTxn(txn).failed(ctx, causingError, nil) } -// Resume resumes the paused job with id using the specified txn (may be nil). -func (r *Registry) Resume(ctx context.Context, txn *kv.Txn, id int64) error { +// Unpause changes the paused job with id to running or reverting using the +// specified txn (may be nil). +func (r *Registry) Unpause(ctx context.Context, txn *kv.Txn, id int64) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err } - return job.WithTxn(txn).resumed(ctx) + return job.WithTxn(txn).unpaused(ctx) } // Resumer is a resumable job, and is associated with a Job object. Jobs can be diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index 330a26ad6d6e..12cb7a89270c 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -218,12 +218,13 @@ func TestRegistryGC(t *testing.T) { muchEarlier := ts.Add(-2 * time.Hour) setMutations := func(mutations []sqlbase.DescriptorMutation) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") + desc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.Mutations = mutations if err := kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), - sqlbase.WrapDescriptor(desc), + desc.DescriptorProto(), ); err != nil { t.Fatal(err) } @@ -231,12 +232,13 @@ func TestRegistryGC(t *testing.T) { } setGCMutations := func(gcMutations []sqlbase.TableDescriptor_GCDescriptorMutation) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") + desc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.GCMutations = gcMutations if err := kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), - sqlbase.WrapDescriptor(desc), + desc.DescriptorProto(), ); err != nil { t.Fatal(err) } @@ -244,7 +246,8 @@ func TestRegistryGC(t *testing.T) { } setDropJob := func(shouldDrop bool) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") + desc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") if shouldDrop { desc.DropJobID = 123 } else { @@ -254,7 +257,7 @@ func TestRegistryGC(t *testing.T) { if err := kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), - sqlbase.WrapDescriptor(desc), + desc.DescriptorProto(), ); err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/client_rangefeed_test.go b/pkg/kv/kvserver/client_rangefeed_test.go index 649727e62ef6..a8571eb4fcbe 100644 --- a/pkg/kv/kvserver/client_rangefeed_test.go +++ b/pkg/kv/kvserver/client_rangefeed_test.go @@ -63,15 +63,12 @@ func TestRangefeedWorksOnSystemRangesUnconditionally(t *testing.T) { const junkDescriptorID = 42 require.GreaterOrEqual(t, keys.MaxReservedDescID, junkDescriptorID) junkDescriptorKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, junkDescriptorID) - junkDescriptor := sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ - Name: "junk", - ID: junkDescriptorID, - }) + junkDescriptor := sqlbase.NewInitialDatabaseDescriptor(junkDescriptorID, "junk") require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } - return txn.Put(ctx, junkDescriptorKey, junkDescriptor) + return txn.Put(ctx, junkDescriptorKey, junkDescriptor.DescriptorProto()) })) after := db.Clock().Now() for { @@ -83,7 +80,7 @@ func TestRangefeedWorksOnSystemRangesUnconditionally(t *testing.T) { if ev.Val != nil && ev.Val.Key.Equal(junkDescriptorKey) { var gotProto sqlbase.Descriptor require.NoError(t, ev.Val.Value.GetProto(&gotProto)) - require.EqualValues(t, junkDescriptor, &gotProto) + require.EqualValues(t, junkDescriptor.DescriptorProto(), &gotProto) break } } diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index a69c3709b579..5ab1f0df8630 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -1303,7 +1303,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { for i := keys.MinUserDescID; i <= userTableMax; i++ { // We don't care about the value, just the key. key := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(i)) - if err := txn.Put(ctx, key, sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{})); err != nil { + if err := txn.Put(ctx, key, (&sqlbase.TableDescriptor{}).DescriptorProto()); err != nil { return err } } @@ -1370,7 +1370,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // This time, only write the last table descriptor. Splits only occur for // the descriptor we add. We don't care about the value, just the key. k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(userTableMax)) - return txn.Put(ctx, k, sqlbase.WrapDescriptor(&sqlbase.TableDescriptor{})) + return txn.Put(ctx, k, (&sqlbase.TableDescriptor{}).DescriptorProto()) }); err != nil { t.Fatal(err) } @@ -1705,9 +1705,9 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { } return replica } - blacklistedLeaseHolder := leaseHolder(leftKey) - log.Infof(ctx, "blacklisting replica %+v for leases", blacklistedLeaseHolder) - noLeaseForDesc.Store(&blacklistedLeaseHolder) + blocklistedLeaseHolder := leaseHolder(leftKey) + log.Infof(ctx, "blocklisting replica %+v for leases", blocklistedLeaseHolder) + noLeaseForDesc.Store(&blocklistedLeaseHolder) // Pull the trigger. This actually also reads the RHS descriptor after the // split, so when this returns, we've got the leases set up already. @@ -1727,9 +1727,9 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { } if currentLHSLeaseHolder := leaseHolder(leftKey); !reflect.DeepEqual( - currentLHSLeaseHolder, blacklistedLeaseHolder) { + currentLHSLeaseHolder, blocklistedLeaseHolder) { t.Fatalf("lease holder changed from %+v to %+v, should de-flake this test", - blacklistedLeaseHolder, currentLHSLeaseHolder) + blocklistedLeaseHolder, currentLHSLeaseHolder) } // This write (to the right-hand side of the split) should hit the @@ -1755,7 +1755,7 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { // that it's the same ReplicaID, which is not required but should always // hold). if rhsLease := leaseHolder(rightKey); !reflect.DeepEqual( - rhsLease, blacklistedLeaseHolder, + rhsLease, blocklistedLeaseHolder, ) { t.Errorf("expected LHS and RHS to have same lease holder") } diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go index aba3ca1b4ebc..df078d108ec5 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go @@ -16,6 +16,7 @@ import ( "fmt" "io/ioutil" "reflect" + "regexp" "runtime" "sort" "strconv" @@ -925,11 +926,18 @@ func (m *monitor) collectRecordings() string { if log.g.opSeq != 0 { seq = strconv.Itoa(log.g.opSeq) } - fmt.Fprintf(&buf, "[%s] %s: %s", seq, log.g.opName, log.value) + logValue := stripFileLinePrefix(log.value) + fmt.Fprintf(&buf, "[%s] %s: %s", seq, log.g.opName, logValue) } return buf.String() } +func stripFileLinePrefix(s string) string { + return reFileLinePrefix.ReplaceAllString(s, "") +} + +var reFileLinePrefix = regexp.MustCompile(`^[^:]+:\d+ `) + func (m *monitor) hasNewEvents(g *monitoredGoroutine) bool { events := 0 rec := g.collect() diff --git a/pkg/kv/kvserver/gossip_test.go b/pkg/kv/kvserver/gossip_test.go index 93de691aa152..937950fe51fe 100644 --- a/pkg/kv/kvserver/gossip_test.go +++ b/pkg/kv/kvserver/gossip_test.go @@ -212,12 +212,16 @@ func TestGossipAfterAbortOfSystemConfigTransactionAfterFailureDueToIntents(t *te txB := db.NewTxn(ctx, "b") require.NoError(t, txA.SetSystemConfigTrigger()) - require.NoError(t, txA.Put( - ctx, keys.SystemSQLCodec.DescMetadataKey(1000), sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{}))) + db1000 := sqlbase.NewInitialDatabaseDescriptor(1000, "1000") + require.NoError(t, txA.Put(ctx, + keys.SystemSQLCodec.DescMetadataKey(1000), + db1000.DescriptorProto())) require.NoError(t, txB.SetSystemConfigTrigger()) - require.NoError(t, txB.Put( - ctx, keys.SystemSQLCodec.DescMetadataKey(2000), sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{}))) + db2000 := sqlbase.NewInitialDatabaseDescriptor(2000, "2000") + require.NoError(t, txB.Put(ctx, + keys.SystemSQLCodec.DescMetadataKey(2000), + db2000.DescriptorProto())) const someTime = 10 * time.Millisecond clearNotifictions := func(ch <-chan struct{}) { diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go index 795824d31b70..a71549a51188 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go @@ -465,7 +465,8 @@ func TestCorruptData(t *testing.T) { return err })) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, + log.WithFlattenedSensitiveData) require.NoError(t, err) require.Len(t, entries, 1) for _, e := range entries { @@ -516,7 +517,8 @@ func TestCorruptData(t *testing.T) { })) log.Flush() - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, + log.WithFlattenedSensitiveData) require.NoError(t, err) require.Len(t, entries, 1) for _, e := range entries { diff --git a/pkg/kv/kvserver/raft.go b/pkg/kv/kvserver/raft.go index 6b29eed5ac75..73571545a4da 100644 --- a/pkg/kv/kvserver/raft.go +++ b/pkg/kv/kvserver/raft.go @@ -293,3 +293,6 @@ func traceProposals(r *Replica, ids []kvserverbase.CmdIDKey, event string) { log.Eventf(ctx, "%v", event) } } + +// SafeValue implements the redact.SafeValue interface. +func (SnapshotRequest_Type) SafeValue() {} diff --git a/pkg/kv/kvserver/raft_snapshot_queue.go b/pkg/kv/kvserver/raft_snapshot_queue.go index 3bba46fecf00..2911dab29e89 100644 --- a/pkg/kv/kvserver/raft_snapshot_queue.go +++ b/pkg/kv/kvserver/raft_snapshot_queue.go @@ -123,7 +123,9 @@ func (rq *raftSnapshotQueue) processRaftSnapshot( // bail for now and try again later. err := errors.Errorf( "skipping snapshot; replica is likely a learner in the process of being added: %s", repDesc) - log.Infof(ctx, "%v", err) + // TODO(knz): print the error instead when the error package + // knows how to expose redactable strings. + log.Infof(ctx, "skipping snapshot; replica is likely a learner in the process of being added: %s", repDesc) // TODO(dan): This is super brittle and non-obvious. In the common case, // this check avoids duplicate work, but in rare cases, we send the // learner snap at an index before the one raft wanted here. The raft diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index 2539ed91fbb4..4052d271d310 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -13,7 +13,6 @@ package kvserver import ( "context" "fmt" - "strings" "sync/atomic" "time" "unsafe" @@ -46,6 +45,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/quotapool" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -133,30 +133,41 @@ type atomicDescString struct { // store atomically updates d.strPtr with the string representation of desc. func (d *atomicDescString) store(replicaID roachpb.ReplicaID, desc *roachpb.RangeDescriptor) { - var buf strings.Builder - fmt.Fprintf(&buf, "%d/", desc.RangeID) - if replicaID == 0 { - fmt.Fprintf(&buf, "?:") - } else { - fmt.Fprintf(&buf, "%d:", replicaID) - } + str := redact.Sprintfn(func(w redact.SafePrinter) { + w.Printf("%d/", desc.RangeID) + if replicaID == 0 { + w.SafeString("?:") + } else { + w.Printf("%d:", replicaID) + } - if !desc.IsInitialized() { - buf.WriteString("{-}") - } else { - const maxRangeChars = 30 - rngStr := keys.PrettyPrintRange(roachpb.Key(desc.StartKey), roachpb.Key(desc.EndKey), maxRangeChars) - buf.WriteString(rngStr) - } + if !desc.IsInitialized() { + w.SafeString("{-}") + } else { + const maxRangeChars = 30 + rngStr := keys.PrettyPrintRange(roachpb.Key(desc.StartKey), roachpb.Key(desc.EndKey), maxRangeChars) + w.UnsafeString(rngStr) + } + }) - str := buf.String() atomic.StorePointer(&d.strPtr, unsafe.Pointer(&str)) } // String returns the string representation of the range; since we are not // using a lock, the copy might be inconsistent. func (d *atomicDescString) String() string { - return *(*string)(atomic.LoadPointer(&d.strPtr)) + return d.get().StripMarkers() +} + +// SafeFormat renders the string safely. +func (d *atomicDescString) SafeFormat(w redact.SafePrinter, _ rune) { + w.Print(d.get()) +} + +// Get returns the string representation of the range; since we are not +// using a lock, the copy might be inconsistent. +func (d *atomicDescString) get() redact.RedactableString { + return *(*redact.RedactableString)(atomic.LoadPointer(&d.strPtr)) } // atomicConnectionClass stores an rpc.ConnectionClass atomically. @@ -581,7 +592,13 @@ var _ kv.Sender = &Replica{} // require a lock and its output may not be atomic with other ongoing work in // the replica. This is done to prevent deadlocks in logging sites. func (r *Replica) String() string { - return fmt.Sprintf("[n%d,s%d,r%s]", r.store.Ident.NodeID, r.store.Ident.StoreID, &r.rangeStr) + return redact.StringWithoutMarkers(r) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (r *Replica) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("[n%d,s%d,r%s]", + r.store.Ident.NodeID, r.store.Ident.StoreID, r.rangeStr.get()) } // ReplicaID returns the ID for the Replica. It may be zero if the replica does diff --git a/pkg/kv/kvserver/replica_application_result.go b/pkg/kv/kvserver/replica_application_result.go index 801c0a09ab6e..95ad607c0e02 100644 --- a/pkg/kv/kvserver/replica_application_result.go +++ b/pkg/kv/kvserver/replica_application_result.go @@ -46,7 +46,7 @@ import ( func isTrivial(r *kvserverpb.ReplicatedEvalResult) bool { // Check if there are any non-trivial State updates. if r.State != nil { - stateWhitelist := *r.State + stateAllowlist := *r.State // ReplicaState.Stats was previously non-nullable which caused nodes to // send a zero-value MVCCStats structure. If the proposal was generated by // an old node, we'll have decoded that zero-value structure setting @@ -54,23 +54,23 @@ func isTrivial(r *kvserverpb.ReplicatedEvalResult) bool { // field in ReplicatedEvalResult" assertion to fire if we didn't clear it. // TODO(ajwerner): eliminate this case that likely can no longer occur as of // at least 19.1. - if stateWhitelist.Stats != nil && (*stateWhitelist.Stats == enginepb.MVCCStats{}) { - stateWhitelist.Stats = nil + if stateAllowlist.Stats != nil && (*stateAllowlist.Stats == enginepb.MVCCStats{}) { + stateAllowlist.Stats = nil } - if stateWhitelist != (kvserverpb.ReplicaState{}) { + if stateAllowlist != (kvserverpb.ReplicaState{}) { return false } } - // Set whitelist to the value of r and clear the whitelisted fields. - // If whitelist is zero-valued after clearing the whitelisted fields then + // Set allowlist to the value of r and clear the allowlisted fields. + // If allowlist is zero-valued after clearing the allowlisted fields then // it is trivial. - whitelist := *r - whitelist.Delta = enginepb.MVCCStatsDelta{} - whitelist.Timestamp = hlc.Timestamp{} - whitelist.DeprecatedDelta = nil - whitelist.PrevLeaseProposal = nil - whitelist.State = nil - return whitelist.Equal(kvserverpb.ReplicatedEvalResult{}) + allowlist := *r + allowlist.Delta = enginepb.MVCCStatsDelta{} + allowlist.Timestamp = hlc.Timestamp{} + allowlist.DeprecatedDelta = nil + allowlist.PrevLeaseProposal = nil + allowlist.State = nil + return allowlist.Equal(kvserverpb.ReplicatedEvalResult{}) } // clearTrivialReplicatedEvalResultFields is used to zero out the fields of a diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index 5eea9f930352..10aabe75219b 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -2037,12 +2037,12 @@ func (s *Store) AdminRelocateRange( rangeDesc = *newDesc canRetry := func(err error) bool { - whitelist := []string{ + allowlist := []string{ snapshotApplySemBusyMsg, IntersectingSnapshotMsg, } errStr := err.Error() - for _, substr := range whitelist { + for _, substr := range allowlist { if strings.Contains(errStr, substr) { return true } diff --git a/pkg/kv/kvserver/replica_evaluate.go b/pkg/kv/kvserver/replica_evaluate.go index 1058e5f8c061..20989bc074d3 100644 --- a/pkg/kv/kvserver/replica_evaluate.go +++ b/pkg/kv/kvserver/replica_evaluate.go @@ -187,7 +187,7 @@ func evaluateBatch( // has already been aborted. // - heartbeats don't check the abort span. If the txn is aborted, they'll // return an aborted proto in their otherwise successful response. - // TODO(nvanbenschoten): Let's remove heartbeats from this whitelist when + // TODO(nvanbenschoten): Let's remove heartbeats from this allowlist when // we rationalize the TODO in txnHeartbeater.heartbeat. if !ba.IsSingleAbortTxnRequest() && !ba.IsSingleHeartbeatTxnRequest() { if pErr := checkIfTxnAborted(ctx, rec, readWriter, *baHeader.Txn); pErr != nil { diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index de572196dbca..fb2b3997cc27 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -629,7 +629,7 @@ func TestLearnerReplicateQueueRace(t *testing.T) { } formattedTrace := trace.String() expectedMessages := []string{ - `could not promote .*n3,s3.* to voter, rolling back: change replicas of r\d+ failed: descriptor changed`, + `could not promote .*n3,s3.* to voter, rolling back:.*change replicas of r\d+ failed: descriptor changed`, `learner to roll back not found`, } return testutils.MatchInOrder(formattedTrace, expectedMessages...) diff --git a/pkg/kv/kvserver/replica_proposal.go b/pkg/kv/kvserver/replica_proposal.go index 3bb2c64accaf..105c329e09a8 100644 --- a/pkg/kv/kvserver/replica_proposal.go +++ b/pkg/kv/kvserver/replica_proposal.go @@ -750,7 +750,7 @@ func (r *Replica) evaluateProposal( } // Failed proposals can't have any Result except for what's - // whitelisted here. + // allowlisted here. res.Local = result.LocalResult{ EncounteredIntents: res.Local.DetachEncounteredIntents(), EndTxns: res.Local.DetachEndTxns(true /* alwaysOnly */), diff --git a/pkg/kv/kvserver/replica_raftstorage.go b/pkg/kv/kvserver/replica_raftstorage.go index d615bb6bc228..324fe2c7f5e2 100644 --- a/pkg/kv/kvserver/replica_raftstorage.go +++ b/pkg/kv/kvserver/replica_raftstorage.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -462,7 +463,12 @@ type OutgoingSnapshot struct { } func (s *OutgoingSnapshot) String() string { - return fmt.Sprintf("%s snapshot %s at applied index %d", s.snapType, s.SnapUUID.Short(), s.State.RaftAppliedIndex) + return redact.StringWithoutMarkers(s) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (s *OutgoingSnapshot) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("%s snapshot %s at applied index %d", s.snapType, s.SnapUUID.Short(), s.State.RaftAppliedIndex) } // Close releases the resources associated with the snapshot. @@ -496,7 +502,12 @@ type IncomingSnapshot struct { } func (s *IncomingSnapshot) String() string { - return fmt.Sprintf("%s snapshot %s at applied index %d", s.snapType, s.SnapUUID.Short(), s.State.RaftAppliedIndex) + return redact.StringWithoutMarkers(s) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (s *IncomingSnapshot) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("%s snapshot %s at applied index %d", s.snapType, s.SnapUUID.Short(), s.State.RaftAppliedIndex) } // snapshot creates an OutgoingSnapshot containing a rocksdb snapshot for the diff --git a/pkg/kv/kvserver/replica_read.go b/pkg/kv/kvserver/replica_read.go index 464d92401357..6d04e0d884ac 100644 --- a/pkg/kv/kvserver/replica_read.go +++ b/pkg/kv/kvserver/replica_read.go @@ -137,7 +137,7 @@ func (r *Replica) executeReadOnlyBatchWithServersideRefreshes( if pErr != nil { // Failed read-only batches can't have any Result except for what's - // whitelisted here. + // allowlisted here. res.Local = result.LocalResult{ EncounteredIntents: res.Local.DetachEncounteredIntents(), Metrics: res.Local.Metrics, diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 68b73212f409..02e34dc4f5ae 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -12086,7 +12086,7 @@ func TestProposalNotAcknowledgedOrReproposedAfterApplication(t *testing.T) { stopper.Quiesce(ctx) entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, - regexp.MustCompile("net/trace")) + regexp.MustCompile("net/trace"), log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } @@ -12188,7 +12188,7 @@ func TestLaterReproposalsDoNotReuseContext(t *testing.T) { // Check and see if the trace package logged an error. log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, - regexp.MustCompile("net/trace")) + regexp.MustCompile("net/trace"), log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index ebcd739c5d00..41783a38db6c 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -754,10 +754,8 @@ func compileTestCase(tc baseReportTestCase) (compiledTestCase, error) { return compiledTestCase{}, err } } - sysCfgBuilder.addDBDesc(dbID, sqlbase.DatabaseDescriptor{ - Name: db.name, - ID: sqlbase.ID(dbID), - }) + sysCfgBuilder.addDBDesc(dbID, + sqlbase.NewInitialDatabaseDescriptor(sqlbase.ID(dbID), db.name)) for _, table := range db.tables { tableID := objectCounter @@ -1090,16 +1088,11 @@ func (b *systemConfigBuilder) addTableDesc(id int, tableDesc sqlbase.TableDescri } // addTableDesc adds a database descriptor to the SystemConfig. -func (b *systemConfigBuilder) addDBDesc(id int, dbDesc sqlbase.DatabaseDescriptor) { +func (b *systemConfigBuilder) addDBDesc(id int, dbDesc *sqlbase.ImmutableDatabaseDescriptor) { // Write the table to the SystemConfig, in the descriptors table. k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(id)) - desc := &sqlbase.Descriptor{ - Union: &sqlbase.Descriptor_Database{ - Database: &dbDesc, - }, - } var v roachpb.Value - if err := v.SetProto(desc); err != nil { + if err := v.SetProto(dbDesc.DescriptorProto()); err != nil { panic(err) } b.kv = append(b.kv, roachpb.KeyValue{Key: k, Value: v}) diff --git a/pkg/kv/kvserver/track_raft_protos.go b/pkg/kv/kvserver/track_raft_protos.go index 6cb428fc0333..b77b1927b412 100644 --- a/pkg/kv/kvserver/track_raft_protos.go +++ b/pkg/kv/kvserver/track_raft_protos.go @@ -37,7 +37,7 @@ func TrackRaftProtos() func() []reflect.Type { applyRaftEntryFunc := funcName((*apply.Task).ApplyCommittedEntries) // We only need to track protos that could cause replica divergence // by being written to disk downstream of raft. - whitelist := []string{ + allowlist := []string{ // Some raft operations trigger gossip, but we don't require // strict consistency there. funcName((*gossip.Gossip).AddInfoProto), @@ -92,14 +92,14 @@ func TrackRaftProtos() func() []reflect.Type { for { f, more := frames.Next() - whitelisted := false - for _, s := range whitelist { + allowlisted := false + for _, s := range allowlist { if strings.Contains(f.Function, s) { - whitelisted = true + allowlisted = true break } } - if whitelisted { + if allowlisted { break } diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index 189b609f6c1d..87f688f3c745 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -62,7 +62,11 @@ func TestTxnSnowballTrace(t *testing.T) { found, err := regexp.MatchString( // The (?s) makes "." match \n. This makes the test resilient to other log // lines being interspersed. - "(?s).*event:inside txn\n.*event:client.Txn did AutoCommit. err: \n.*\n.*event:txn complete.*", + `(?s)`+ + `.*event:[^:]*:\d+ inside txn\n`+ + `.*event:[^:]*:\d+ client\.Txn did AutoCommit\. err: \n`+ + `.*\n`+ + `.*event:[^:]*:\d+ txn complete.*`, dump) if err != nil { t.Fatal(err) diff --git a/pkg/release/release.go b/pkg/release/release.go new file mode 100644 index 000000000000..f0e75313983c --- /dev/null +++ b/pkg/release/release.go @@ -0,0 +1,144 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package release contains utilities for assisting with the release process. +// This is intended for use for the release commands. +package release + +import ( + "bufio" + "bytes" + "io" + "log" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/cockroachdb/errors" +) + +// linuxStaticLibsRe returns the regexp of all static libraries. +var linuxStaticLibsRe = func() *regexp.Regexp { + libs := strings.Join([]string{ + regexp.QuoteMeta("linux-vdso.so."), + regexp.QuoteMeta("librt.so."), + regexp.QuoteMeta("libpthread.so."), + regexp.QuoteMeta("libdl.so."), + regexp.QuoteMeta("libm.so."), + regexp.QuoteMeta("libc.so."), + regexp.QuoteMeta("libresolv.so."), + strings.Replace(regexp.QuoteMeta("ld-linux-ARCH.so."), "ARCH", ".*", -1), + }, "|") + return regexp.MustCompile(libs) +}() + +// SupportedTarget contains metadata about a supported target. +type SupportedTarget struct { + BuildType string + Suffix string +} + +// SupportedTargets contains the supported targets that we build. +var SupportedTargets = []SupportedTarget{ + {BuildType: "darwin", Suffix: ".darwin-10.9-amd64"}, + {BuildType: "linux-gnu", Suffix: ".linux-2.6.32-gnu-amd64"}, + {BuildType: "windows", Suffix: ".windows-6.2-amd64.exe"}, +} + +// makeReleaseAndVerifyOptions are options for MakeRelease. +type makeReleaseAndVerifyOptions struct { + args []string + execFn ExecFn +} + +// ExecFn is a mockable wrapper that executes the given command. +type ExecFn func(*exec.Cmd) ([]byte, error) + +// DefaultExecFn is the default exec function. +var DefaultExecFn ExecFn = func(c *exec.Cmd) ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("exec: Stdout already set") + } + var stdout bytes.Buffer + c.Stdout = io.MultiWriter(&stdout, os.Stdout) + err := c.Run() + return stdout.Bytes(), err +} + +// MakeReleaseOption as an option for the MakeRelease function. +type MakeReleaseOption func(makeReleaseAndVerifyOptions) makeReleaseAndVerifyOptions + +// WithMakeReleaseOptionBuildArg adds a build argument to release. +func WithMakeReleaseOptionBuildArg(arg string) MakeReleaseOption { + return func(m makeReleaseAndVerifyOptions) makeReleaseAndVerifyOptions { + m.args = append(m.args, arg) + return m + } +} + +// WithMakeReleaseOptionExecFn changes the exec function of the given execFn. +func WithMakeReleaseOptionExecFn(r ExecFn) MakeReleaseOption { + return func(m makeReleaseAndVerifyOptions) makeReleaseAndVerifyOptions { + m.execFn = r + return m + } +} + +// MakeRelease makes the release binary. +func MakeRelease(b SupportedTarget, pkgDir string, opts ...MakeReleaseOption) error { + params := makeReleaseAndVerifyOptions{ + execFn: DefaultExecFn, + } + for _, opt := range opts { + params = opt(params) + } + + { + args := append([]string{b.BuildType}, params.args...) + cmd := exec.Command("mkrelease", args...) + cmd.Dir = pkgDir + cmd.Stderr = os.Stderr + log.Printf("%s %s", cmd.Env, cmd.Args) + if out, err := params.execFn(cmd); err != nil { + return errors.Newf("%s %s: %s\n\n%s", cmd.Env, cmd.Args, err, out) + } + } + if strings.Contains(b.BuildType, "linux") { + binaryName := "./cockroach" + b.Suffix + + cmd := exec.Command(binaryName, "version") + cmd.Dir = pkgDir + cmd.Env = append(cmd.Env, "MALLOC_CONF=prof:true") + cmd.Stderr = os.Stderr + log.Printf("%s %s", cmd.Env, cmd.Args) + if out, err := params.execFn(cmd); err != nil { + return errors.Newf("%s %s: %s\n\n%s", cmd.Env, cmd.Args, err, out) + } + + cmd = exec.Command("ldd", binaryName) + cmd.Dir = pkgDir + log.Printf("%s %s", cmd.Env, cmd.Args) + out, err := params.execFn(cmd) + if err != nil { + log.Fatalf("%s: out=%q err=%s", cmd.Args, out, err) + } + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + if line := scanner.Text(); !linuxStaticLibsRe.MatchString(line) { + return errors.Newf("%s is not properly statically linked:\n%s", binaryName, out) + } + } + if err := scanner.Err(); err != nil { + return err + } + } + return nil +} diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index ff09c219d246..e01ac2424cae 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -38,6 +38,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/interval" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/cockroach/pkg/util/timetz" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -1603,6 +1604,11 @@ func confChangeImpl( var _ fmt.Stringer = &ChangeReplicasTrigger{} func (crt ChangeReplicasTrigger) String() string { + return redact.StringWithoutMarkers(crt) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (crt ChangeReplicasTrigger) SafeFormat(w redact.SafePrinter, _ rune) { var nextReplicaID ReplicaID var afterReplicas []ReplicaDescriptor added, removed := crt.Added(), crt.Removed() @@ -1617,10 +1623,9 @@ func (crt ChangeReplicasTrigger) String() string { nextReplicaID = crt.DeprecatedNextReplicaID afterReplicas = crt.DeprecatedUpdatedReplicas } - var chgS strings.Builder cc, err := crt.ConfChange(nil) if err != nil { - fmt.Fprintf(&chgS, "", err) + w.Printf("", err) } else { ccv2 := cc.AsV2() if ccv2.LeaveJoint() { @@ -1628,24 +1633,48 @@ func (crt ChangeReplicasTrigger) String() string { // // TODO(tbg): could list the replicas that will actually leave the // voter set. - fmt.Fprintf(&chgS, "LEAVE_JOINT") + w.SafeString("LEAVE_JOINT") } else if _, ok := ccv2.EnterJoint(); ok { - fmt.Fprintf(&chgS, "ENTER_JOINT(%s) ", raftpb.ConfChangesToString(ccv2.Changes)) + w.Printf("ENTER_JOINT(%s) ", confChangesToRedactableString(ccv2.Changes)) } else { - fmt.Fprintf(&chgS, "SIMPLE(%s) ", raftpb.ConfChangesToString(ccv2.Changes)) + w.Printf("SIMPLE(%s) ", confChangesToRedactableString(ccv2.Changes)) } } if len(added) > 0 { - fmt.Fprintf(&chgS, "%s%s", ADD_REPLICA, added) + w.Printf("%s%s", ADD_REPLICA, added) } if len(removed) > 0 { if len(added) > 0 { - chgS.WriteString(", ") + w.SafeString(", ") } - fmt.Fprintf(&chgS, "%s%s", REMOVE_REPLICA, removed) + w.Printf("%s%s", REMOVE_REPLICA, removed) } - fmt.Fprintf(&chgS, ": after=%s next=%d", afterReplicas, nextReplicaID) - return chgS.String() + w.Printf(": after=%s next=%d", afterReplicas, nextReplicaID) +} + +// confChangesToRedactableString produces a safe representation for +// the configuration changes. +func confChangesToRedactableString(ccs []raftpb.ConfChangeSingle) redact.RedactableString { + return redact.Sprintfn(func(w redact.SafePrinter) { + for i, cc := range ccs { + if i > 0 { + w.SafeRune(' ') + } + switch cc.Type { + case raftpb.ConfChangeAddNode: + w.SafeRune('v') + case raftpb.ConfChangeAddLearnerNode: + w.SafeRune('l') + case raftpb.ConfChangeRemoveNode: + w.SafeRune('r') + case raftpb.ConfChangeUpdateNode: + w.SafeRune('u') + default: + w.SafeString("unknown") + } + w.Print(cc.NodeID) + } + }) } func (crt ChangeReplicasTrigger) legacy() (ReplicaDescriptor, bool) { @@ -2247,3 +2276,6 @@ func init() { enginepb.FormatBytesAsKey = func(k []byte) string { return Key(k).String() } enginepb.FormatBytesAsValue = func(v []byte) string { return Value{RawBytes: v}.PrettyPrint() } } + +// SafeValue implements the redact.SafeValue interface. +func (ReplicaChangeType) SafeValue() {} diff --git a/pkg/roachpb/metadata.go b/pkg/roachpb/metadata.go index e0d82e30d46b..70a34a1cf24e 100644 --- a/pkg/roachpb/metadata.go +++ b/pkg/roachpb/metadata.go @@ -11,7 +11,6 @@ package roachpb import ( - "bytes" "fmt" "sort" "strconv" @@ -19,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/errors" ) @@ -32,6 +32,9 @@ func (n NodeID) String() string { return strconv.FormatInt(int64(n), 10) } +// SafeValue implements the redact.SafeValue interface. +func (n NodeID) SafeValue() {} + // StoreID is a custom type for a cockroach store ID. type StoreID int32 @@ -48,6 +51,9 @@ func (n StoreID) String() string { return strconv.FormatInt(int64(n), 10) } +// SafeValue implements the redact.SafeValue interface. +func (n StoreID) SafeValue() {} + // A RangeID is a unique ID associated to a Raft consensus group. type RangeID int64 @@ -56,6 +62,9 @@ func (r RangeID) String() string { return strconv.FormatInt(int64(r), 10) } +// SafeValue implements the redact.SafeValue interface. +func (r RangeID) SafeValue() {} + // RangeIDSlice implements sort.Interface. type RangeIDSlice []RangeID @@ -71,6 +80,9 @@ func (r ReplicaID) String() string { return strconv.FormatInt(int64(r), 10) } +// SafeValue implements the redact.SafeValue interface. +func (r ReplicaID) SafeValue() {} + // Equals returns whether the Attributes lists are equivalent. Attributes lists // are treated as sets, meaning that ordering and duplicates are ignored. func (a Attributes) Equals(b Attributes) bool { @@ -281,100 +293,60 @@ func (r *RangeDescriptor) Validate() error { } func (r RangeDescriptor) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "r%d:", r.RangeID) + return redact.StringWithoutMarkers(r) +} +// SafeFormat implements the redact.SafeFormatter interface. +func (r RangeDescriptor) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("r%d:", r.RangeID) if !r.IsInitialized() { - buf.WriteString("{-}") + w.SafeString("{-}") } else { - buf.WriteString(r.RSpan().String()) + w.Print(r.RSpan()) } - buf.WriteString(" [") + w.SafeString(" [") if allReplicas := r.Replicas().All(); len(allReplicas) > 0 { for i, rep := range allReplicas { if i > 0 { - buf.WriteString(", ") + w.SafeString(", ") } - buf.WriteString(rep.String()) + w.Print(rep) } } else { - buf.WriteString("") + w.SafeString("") } - fmt.Fprintf(&buf, ", next=%d, gen=%d", r.NextReplicaID, r.Generation) + w.Printf(", next=%d, gen=%d", r.NextReplicaID, r.Generation) if s := r.GetStickyBit(); !s.IsEmpty() { - fmt.Fprintf(&buf, ", sticky=%s", s) + w.Printf(", sticky=%s", s) } - buf.WriteString("]") - - return buf.String() + w.SafeString("]") } -// SafeMessage implements the SafeMessager interface. -// -// This method should be kept in sync with the String() method, except for the Start/End keys, which are customer data. -func (r RangeDescriptor) SafeMessage() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "r%d:", r.RangeID) - if !r.IsInitialized() { - buf.WriteString("{-}") - } - buf.WriteString(" [") - - if allReplicas := r.Replicas().All(); len(allReplicas) > 0 { - for i, rep := range allReplicas { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(rep.SafeMessage()) - } - } else { - buf.WriteString("") - } - fmt.Fprintf(&buf, ", next=%d, gen=%d", r.NextReplicaID, r.Generation) - if s := r.GetStickyBit(); !s.IsEmpty() { - fmt.Fprintf(&buf, ", sticky=%s", s) - } - buf.WriteString("]") - - return buf.String() +func (r ReplicationTarget) String() string { + return redact.StringWithoutMarkers(r) } -func (r ReplicationTarget) String() string { - return fmt.Sprintf("n%d,s%d", r.NodeID, r.StoreID) +// SafeFormat implements the redact.SafeFormatter interface. +func (r ReplicationTarget) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("n%d,s%d", r.NodeID, r.StoreID) } func (r ReplicaDescriptor) String() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "(n%d,s%d):", r.NodeID, r.StoreID) - if r.ReplicaID == 0 { - buf.WriteString("?") - } else { - fmt.Fprintf(&buf, "%d", r.ReplicaID) - } - if typ := r.GetType(); typ != VOTER_FULL { - buf.WriteString(typ.String()) - } - return buf.String() + return redact.StringWithoutMarkers(r) } -// SafeMessage implements the SafeMessager interface. -// -// This method should be kept in sync with the String() method, while there is no customer data in the ReplicaDescriptor -// today, we maintain this method for future compatibility, since its used from other places -// such as RangeDescriptor#SafeMessage() -func (r ReplicaDescriptor) SafeMessage() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "(n%d,s%d):", r.NodeID, r.StoreID) +// SafeFormat implements the redact.SafeFormatter interface. +func (r ReplicaDescriptor) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("(n%d,s%d):", r.NodeID, r.StoreID) if r.ReplicaID == 0 { - buf.WriteString("?") + w.SafeRune('?') } else { - fmt.Fprintf(&buf, "%d", r.ReplicaID) + w.Print(r.ReplicaID) } if typ := r.GetType(); typ != VOTER_FULL { - buf.WriteString(typ.String()) + w.Print(typ) } - return buf.String() } // Validate performs some basic validation of the contents of a replica descriptor. @@ -399,6 +371,9 @@ func (r ReplicaDescriptor) GetType() ReplicaType { return *r.Type } +// SafeValue implements the redact.SafeValue interface. +func (r ReplicaType) SafeValue() {} + // PercentilesFromData derives percentiles from a slice of data points. // Sorts the input data if it isn't already sorted. func PercentilesFromData(data []float64) Percentiles { @@ -431,13 +406,23 @@ func percentileFromSortedData(data []float64, percent float64) float64 { // String returns a string representation of the Percentiles. func (p Percentiles) String() string { - return fmt.Sprintf("p10=%.2f p25=%.2f p50=%.2f p75=%.2f p90=%.2f pMax=%.2f", + return redact.StringWithoutMarkers(p) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (p Percentiles) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("p10=%.2f p25=%.2f p50=%.2f p75=%.2f p90=%.2f pMax=%.2f", p.P10, p.P25, p.P50, p.P75, p.P90, p.PMax) } // String returns a string representation of the StoreCapacity. func (sc StoreCapacity) String() string { - return fmt.Sprintf("disk (capacity=%s, available=%s, used=%s, logicalBytes=%s), "+ + return redact.StringWithoutMarkers(sc) +} + +// SafeFormat implements the redact.SafeFormatter interface. +func (sc StoreCapacity) SafeFormat(w redact.SafePrinter, _ rune) { + w.Printf("disk (capacity=%s, available=%s, used=%s, logicalBytes=%s), "+ "ranges=%d, leases=%d, queries=%.2f, writes=%.2f, "+ "bytesPerReplica={%s}, writesPerReplica={%s}", humanizeutil.IBytes(sc.Capacity), humanizeutil.IBytes(sc.Available), diff --git a/pkg/roachpb/metadata_test.go b/pkg/roachpb/metadata_test.go index 4a7f52702cfb..e7357ddcb292 100644 --- a/pkg/roachpb/metadata_test.go +++ b/pkg/roachpb/metadata_test.go @@ -16,6 +16,7 @@ import ( "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/errors" ) @@ -89,9 +90,9 @@ func TestRangeDescriptorSafeMessage(t *testing.T) { }, } - const expStr = `r1: [(n1,s1):?, (n2,s2):?, (n3,s3):?, next=0, gen=0]` + const expStr = `r1:‹{c-g}› [(n1,s1):?, (n2,s2):?, (n3,s3):?, next=0, gen=0]` - if str := desc.SafeMessage(); str != expStr { + if str := redact.Sprint(desc); str != expStr { t.Errorf( "expected meta: %s\n"+ "got: %s", diff --git a/pkg/server/authentication_test.go b/pkg/server/authentication_test.go index ac540ccfef43..4e8867a4c40b 100644 --- a/pkg/server/authentication_test.go +++ b/pkg/server/authentication_test.go @@ -73,7 +73,7 @@ func (insecureCtx) HTTPRequestScheme() string { return "https" } -// Verify client certificate enforcement and user whitelisting. +// Verify client certificate enforcement and user allowlisting. func TestSSLEnforcement(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{ diff --git a/pkg/server/debug/logspy.go b/pkg/server/debug/logspy.go index 4434224f4c78..b0c1bd67f9b3 100644 --- a/pkg/server/debug/logspy.go +++ b/pkg/server/debug/logspy.go @@ -13,7 +13,6 @@ package debug import ( "context" "encoding/json" - "fmt" "io" "net/http" "net/url" @@ -22,7 +21,6 @@ import ( "sync/atomic" "time" - "github.com/cockroachdb/cockroach/pkg/util/caller" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -165,10 +163,9 @@ func (spy *logSpy) run(ctx context.Context, w io.Writer, opts logSpyOptions) (er defer func() { if err == nil { if dropped := atomic.LoadInt32(&countDropped); dropped > 0 { - f, l, _ := caller.Lookup(0) entry := log.MakeEntry( - log.Severity_WARNING, timeutil.Now().UnixNano(), f, l, - fmt.Sprintf("%d messages were dropped", dropped)) + ctx, log.Severity_WARNING, nil /* LogCounter */, 0 /* depth */, false, /* redactable */ + "%d messages were dropped", log.Safe(dropped)) err = entry.Format(w) // modify return value } } @@ -181,16 +178,16 @@ func (spy *logSpy) run(ctx context.Context, w io.Writer, opts logSpyOptions) (er entries := make(chan log.Entry, logSpyChanCap) { - f, l, _ := caller.Lookup(0) entry := log.MakeEntry( - log.Severity_INFO, timeutil.Now().UnixNano(), f, l, - fmt.Sprintf("intercepting logs with options %+v", opts)) + ctx, log.Severity_INFO, nil /* LogCounter */, 0 /* depth */, false, /* redactable */ + "intercepting logs with options %+v", opts) entries <- entry } spy.setIntercept(ctx, func(entry log.Entry) { if re := opts.Grep.re; re != nil { switch { + case re.MatchString(entry.Tags): case re.MatchString(entry.Message): case re.MatchString(entry.File): case opts.Grep.i != 0 && opts.Grep.i == entry.Goroutine: diff --git a/pkg/server/server_sql_test.go b/pkg/server/server_sql_test.go index 99e1932d1809..b6bc9d4ec3ca 100644 --- a/pkg/server/server_sql_test.go +++ b/pkg/server/server_sql_test.go @@ -34,7 +34,7 @@ import ( // and must not rely on having a NodeID/NodeDescriptor/NodeLiveness/... // // In short, it should not rely on the test server through anything other than a -// `*kv.DB` and a small number of whitelisted RPCs. +// `*kv.DB` and a small number of allowlisted RPCs. func TestSQLServer(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 59a9fd513735..19a79e60ee24 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -550,10 +550,9 @@ func TestSystemConfigGossip(t *testing.T) { key := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, keys.MaxReservedDescID) valAt := func(i int) *sqlbase.Descriptor { - return sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ - ID: sqlbase.ID(i), - Name: "foo", - }) + return sqlbase.NewInitialDatabaseDescriptor( + sqlbase.ID(i), "foo", + ).DescriptorProto() } // Register a callback for gossip updates. diff --git a/pkg/server/serverpb/status.pb.go b/pkg/server/serverpb/status.pb.go index ac311fcb22a1..af5149b1cd89 100644 --- a/pkg/server/serverpb/status.pb.go +++ b/pkg/server/serverpb/status.pb.go @@ -65,7 +65,7 @@ func (x StacksType) String() string { return proto.EnumName(StacksType_name, int32(x)) } func (StacksType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{0} + return fileDescriptor_status_731fb2d638c68f09, []int{0} } // Represents the type of file. @@ -92,7 +92,7 @@ func (x FileType) String() string { return proto.EnumName(FileType_name, int32(x)) } func (FileType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{1} + return fileDescriptor_status_731fb2d638c68f09, []int{1} } // We use an enum to allow reporting of client certs and potential others (eg: @@ -129,7 +129,7 @@ func (x CertificateDetails_CertificateType) String() string { return proto.EnumName(CertificateDetails_CertificateType_name, int32(x)) } func (CertificateDetails_CertificateType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{1, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{1, 0} } type ProfileRequest_Type int32 @@ -149,7 +149,7 @@ func (x ProfileRequest_Type) String() string { return proto.EnumName(ProfileRequest_Type_name, int32(x)) } func (ProfileRequest_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{36, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{36, 0} } // Enum for phase of execution. @@ -173,7 +173,7 @@ func (x ActiveQuery_Phase) String() string { return proto.EnumName(ActiveQuery_Phase_name, int32(x)) } func (ActiveQuery_Phase) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{44, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{44, 0} } type CertificatesRequest struct { @@ -186,7 +186,7 @@ func (m *CertificatesRequest) Reset() { *m = CertificatesRequest{} } func (m *CertificatesRequest) String() string { return proto.CompactTextString(m) } func (*CertificatesRequest) ProtoMessage() {} func (*CertificatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{0} + return fileDescriptor_status_731fb2d638c68f09, []int{0} } func (m *CertificatesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -225,7 +225,7 @@ func (m *CertificateDetails) Reset() { *m = CertificateDetails{} } func (m *CertificateDetails) String() string { return proto.CompactTextString(m) } func (*CertificateDetails) ProtoMessage() {} func (*CertificateDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{1} + return fileDescriptor_status_731fb2d638c68f09, []int{1} } func (m *CertificateDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -266,7 +266,7 @@ func (m *CertificateDetails_Fields) Reset() { *m = CertificateDetails_Fi func (m *CertificateDetails_Fields) String() string { return proto.CompactTextString(m) } func (*CertificateDetails_Fields) ProtoMessage() {} func (*CertificateDetails_Fields) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{1, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{1, 0} } func (m *CertificateDetails_Fields) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ func (m *CertificatesResponse) Reset() { *m = CertificatesResponse{} } func (m *CertificatesResponse) String() string { return proto.CompactTextString(m) } func (*CertificatesResponse) ProtoMessage() {} func (*CertificatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{2} + return fileDescriptor_status_731fb2d638c68f09, []int{2} } func (m *CertificatesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,7 +336,7 @@ func (m *DetailsRequest) Reset() { *m = DetailsRequest{} } func (m *DetailsRequest) String() string { return proto.CompactTextString(m) } func (*DetailsRequest) ProtoMessage() {} func (*DetailsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{3} + return fileDescriptor_status_731fb2d638c68f09, []int{3} } func (m *DetailsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -373,7 +373,7 @@ func (m *SystemInfo) Reset() { *m = SystemInfo{} } func (m *SystemInfo) String() string { return proto.CompactTextString(m) } func (*SystemInfo) ProtoMessage() {} func (*SystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{4} + return fileDescriptor_status_731fb2d638c68f09, []int{4} } func (m *SystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ func (m *DetailsResponse) Reset() { *m = DetailsResponse{} } func (m *DetailsResponse) String() string { return proto.CompactTextString(m) } func (*DetailsResponse) ProtoMessage() {} func (*DetailsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{5} + return fileDescriptor_status_731fb2d638c68f09, []int{5} } func (m *DetailsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ func (m *NodesRequest) Reset() { *m = NodesRequest{} } func (m *NodesRequest) String() string { return proto.CompactTextString(m) } func (*NodesRequest) ProtoMessage() {} func (*NodesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{6} + return fileDescriptor_status_731fb2d638c68f09, []int{6} } func (m *NodesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *NodesResponse) Reset() { *m = NodesResponse{} } func (m *NodesResponse) String() string { return proto.CompactTextString(m) } func (*NodesResponse) ProtoMessage() {} func (*NodesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{7} + return fileDescriptor_status_731fb2d638c68f09, []int{7} } func (m *NodesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -511,7 +511,7 @@ func (m *NodeRequest) Reset() { *m = NodeRequest{} } func (m *NodeRequest) String() string { return proto.CompactTextString(m) } func (*NodeRequest) ProtoMessage() {} func (*NodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{8} + return fileDescriptor_status_731fb2d638c68f09, []int{8} } func (m *NodeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ func (m *RaftState) Reset() { *m = RaftState{} } func (m *RaftState) String() string { return proto.CompactTextString(m) } func (*RaftState) ProtoMessage() {} func (*RaftState) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{9} + return fileDescriptor_status_731fb2d638c68f09, []int{9} } func (m *RaftState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -593,7 +593,7 @@ func (m *RaftState_Progress) Reset() { *m = RaftState_Progress{} } func (m *RaftState_Progress) String() string { return proto.CompactTextString(m) } func (*RaftState_Progress) ProtoMessage() {} func (*RaftState_Progress) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{9, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{9, 0} } func (m *RaftState_Progress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +638,7 @@ func (m *RangeProblems) Reset() { *m = RangeProblems{} } func (m *RangeProblems) String() string { return proto.CompactTextString(m) } func (*RangeProblems) ProtoMessage() {} func (*RangeProblems) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{10} + return fileDescriptor_status_731fb2d638c68f09, []int{10} } func (m *RangeProblems) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -674,7 +674,7 @@ func (m *RangeStatistics) Reset() { *m = RangeStatistics{} } func (m *RangeStatistics) String() string { return proto.CompactTextString(m) } func (*RangeStatistics) ProtoMessage() {} func (*RangeStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{11} + return fileDescriptor_status_731fb2d638c68f09, []int{11} } func (m *RangeStatistics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *PrettySpan) Reset() { *m = PrettySpan{} } func (m *PrettySpan) String() string { return proto.CompactTextString(m) } func (*PrettySpan) ProtoMessage() {} func (*PrettySpan) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{12} + return fileDescriptor_status_731fb2d638c68f09, []int{12} } func (m *PrettySpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -754,7 +754,7 @@ func (m *RangeInfo) Reset() { *m = RangeInfo{} } func (m *RangeInfo) String() string { return proto.CompactTextString(m) } func (*RangeInfo) ProtoMessage() {} func (*RangeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{13} + return fileDescriptor_status_731fb2d638c68f09, []int{13} } func (m *RangeInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -790,7 +790,7 @@ func (m *RangesRequest) Reset() { *m = RangesRequest{} } func (m *RangesRequest) String() string { return proto.CompactTextString(m) } func (*RangesRequest) ProtoMessage() {} func (*RangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{14} + return fileDescriptor_status_731fb2d638c68f09, []int{14} } func (m *RangesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -823,7 +823,7 @@ func (m *RangesResponse) Reset() { *m = RangesResponse{} } func (m *RangesResponse) String() string { return proto.CompactTextString(m) } func (*RangesResponse) ProtoMessage() {} func (*RangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{15} + return fileDescriptor_status_731fb2d638c68f09, []int{15} } func (m *RangesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -858,7 +858,7 @@ func (m *GossipRequest) Reset() { *m = GossipRequest{} } func (m *GossipRequest) String() string { return proto.CompactTextString(m) } func (*GossipRequest) ProtoMessage() {} func (*GossipRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{16} + return fileDescriptor_status_731fb2d638c68f09, []int{16} } func (m *GossipRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -893,7 +893,7 @@ func (m *EngineStatsInfo) Reset() { *m = EngineStatsInfo{} } func (m *EngineStatsInfo) String() string { return proto.CompactTextString(m) } func (*EngineStatsInfo) ProtoMessage() {} func (*EngineStatsInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{17} + return fileDescriptor_status_731fb2d638c68f09, []int{17} } func (m *EngineStatsInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -928,7 +928,7 @@ func (m *EngineStatsRequest) Reset() { *m = EngineStatsRequest{} } func (m *EngineStatsRequest) String() string { return proto.CompactTextString(m) } func (*EngineStatsRequest) ProtoMessage() {} func (*EngineStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{18} + return fileDescriptor_status_731fb2d638c68f09, []int{18} } func (m *EngineStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -961,7 +961,7 @@ func (m *EngineStatsResponse) Reset() { *m = EngineStatsResponse{} } func (m *EngineStatsResponse) String() string { return proto.CompactTextString(m) } func (*EngineStatsResponse) ProtoMessage() {} func (*EngineStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{19} + return fileDescriptor_status_731fb2d638c68f09, []int{19} } func (m *EngineStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -995,7 +995,7 @@ func (m *TraceEvent) Reset() { *m = TraceEvent{} } func (m *TraceEvent) String() string { return proto.CompactTextString(m) } func (*TraceEvent) ProtoMessage() {} func (*TraceEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{20} + return fileDescriptor_status_731fb2d638c68f09, []int{20} } func (m *TraceEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1029,7 @@ func (m *AllocatorDryRun) Reset() { *m = AllocatorDryRun{} } func (m *AllocatorDryRun) String() string { return proto.CompactTextString(m) } func (*AllocatorDryRun) ProtoMessage() {} func (*AllocatorDryRun) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{21} + return fileDescriptor_status_731fb2d638c68f09, []int{21} } func (m *AllocatorDryRun) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1062,7 +1062,7 @@ func (m *AllocatorRangeRequest) Reset() { *m = AllocatorRangeRequest{} } func (m *AllocatorRangeRequest) String() string { return proto.CompactTextString(m) } func (*AllocatorRangeRequest) ProtoMessage() {} func (*AllocatorRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{22} + return fileDescriptor_status_731fb2d638c68f09, []int{22} } func (m *AllocatorRangeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1098,7 +1098,7 @@ func (m *AllocatorRangeResponse) Reset() { *m = AllocatorRangeResponse{} func (m *AllocatorRangeResponse) String() string { return proto.CompactTextString(m) } func (*AllocatorRangeResponse) ProtoMessage() {} func (*AllocatorRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{23} + return fileDescriptor_status_731fb2d638c68f09, []int{23} } func (m *AllocatorRangeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1132,7 +1132,7 @@ func (m *AllocatorRequest) Reset() { *m = AllocatorRequest{} } func (m *AllocatorRequest) String() string { return proto.CompactTextString(m) } func (*AllocatorRequest) ProtoMessage() {} func (*AllocatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{24} + return fileDescriptor_status_731fb2d638c68f09, []int{24} } func (m *AllocatorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1165,7 +1165,7 @@ func (m *AllocatorResponse) Reset() { *m = AllocatorResponse{} } func (m *AllocatorResponse) String() string { return proto.CompactTextString(m) } func (*AllocatorResponse) ProtoMessage() {} func (*AllocatorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{25} + return fileDescriptor_status_731fb2d638c68f09, []int{25} } func (m *AllocatorResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1198,7 +1198,7 @@ func (m *JSONResponse) Reset() { *m = JSONResponse{} } func (m *JSONResponse) String() string { return proto.CompactTextString(m) } func (*JSONResponse) ProtoMessage() {} func (*JSONResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{26} + return fileDescriptor_status_731fb2d638c68f09, []int{26} } func (m *JSONResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,13 +1232,25 @@ type LogsRequest struct { EndTime string `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` Max string `protobuf:"bytes,5,opt,name=max,proto3" json:"max,omitempty"` Pattern string `protobuf:"bytes,6,opt,name=pattern,proto3" json:"pattern,omitempty"` + // redact, if true, requests redaction of sensitive data away + // from the retrieved log entries. + // Only admin users can send a request with redact = false. + Redact bool `protobuf:"varint,7,opt,name=redact,proto3" json:"redact,omitempty"` + // keep_redactable, if true, requests that retrieved entries preserve + // the redaction markers if any were present in the log files. + // If false, redaction markers are stripped away. + // Note that redact = false && redactable = false implies + // "flat" entries with all sensitive information enclosed and + // no markers; this is suitable for backward-compatibility with + // RPC clients from prior the introduction of redactable logs. + KeepRedactable bool `protobuf:"varint,8,opt,name=keep_redactable,json=keepRedactable,proto3" json:"keep_redactable,omitempty"` } func (m *LogsRequest) Reset() { *m = LogsRequest{} } func (m *LogsRequest) String() string { return proto.CompactTextString(m) } func (*LogsRequest) ProtoMessage() {} func (*LogsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{27} + return fileDescriptor_status_731fb2d638c68f09, []int{27} } func (m *LogsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1271,7 +1283,7 @@ func (m *LogEntriesResponse) Reset() { *m = LogEntriesResponse{} } func (m *LogEntriesResponse) String() string { return proto.CompactTextString(m) } func (*LogEntriesResponse) ProtoMessage() {} func (*LogEntriesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{28} + return fileDescriptor_status_731fb2d638c68f09, []int{28} } func (m *LogEntriesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1306,7 +1318,7 @@ func (m *LogFilesListRequest) Reset() { *m = LogFilesListRequest{} } func (m *LogFilesListRequest) String() string { return proto.CompactTextString(m) } func (*LogFilesListRequest) ProtoMessage() {} func (*LogFilesListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{29} + return fileDescriptor_status_731fb2d638c68f09, []int{29} } func (m *LogFilesListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1339,7 +1351,7 @@ func (m *LogFilesListResponse) Reset() { *m = LogFilesListResponse{} } func (m *LogFilesListResponse) String() string { return proto.CompactTextString(m) } func (*LogFilesListResponse) ProtoMessage() {} func (*LogFilesListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{30} + return fileDescriptor_status_731fb2d638c68f09, []int{30} } func (m *LogFilesListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1369,13 +1381,25 @@ type LogFileRequest struct { // forwarding is necessary. NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` File string `protobuf:"bytes,2,opt,name=file,proto3" json:"file,omitempty"` + // redact, if true, requests redaction of sensitive data away + // from the retrieved log entries. + // Only admin users can send a request with redact = false. + Redact bool `protobuf:"varint,3,opt,name=redact,proto3" json:"redact,omitempty"` + // keep_redactable, if true, requests that retrieved entries preserve + // the redaction markers if any were present in the log files. + // If false, redaction markers are stripped away. + // Note that redact = false && redactable = false implies + // "flat" entries with all sensitive information enclosed and + // no markers; this is suitable for backward-compatibility with + // RPC clients from prior the introduction of redactable logs. + KeepRedactable bool `protobuf:"varint,4,opt,name=keep_redactable,json=keepRedactable,proto3" json:"keep_redactable,omitempty"` } func (m *LogFileRequest) Reset() { *m = LogFileRequest{} } func (m *LogFileRequest) String() string { return proto.CompactTextString(m) } func (*LogFileRequest) ProtoMessage() {} func (*LogFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{31} + return fileDescriptor_status_731fb2d638c68f09, []int{31} } func (m *LogFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1411,7 +1435,7 @@ func (m *StacksRequest) Reset() { *m = StacksRequest{} } func (m *StacksRequest) String() string { return proto.CompactTextString(m) } func (*StacksRequest) ProtoMessage() {} func (*StacksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{32} + return fileDescriptor_status_731fb2d638c68f09, []int{32} } func (m *StacksRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1447,7 +1471,7 @@ func (m *File) Reset() { *m = File{} } func (m *File) String() string { return proto.CompactTextString(m) } func (*File) ProtoMessage() {} func (*File) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{33} + return fileDescriptor_status_731fb2d638c68f09, []int{33} } func (m *File) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1491,7 +1515,7 @@ func (m *GetFilesRequest) Reset() { *m = GetFilesRequest{} } func (m *GetFilesRequest) String() string { return proto.CompactTextString(m) } func (*GetFilesRequest) ProtoMessage() {} func (*GetFilesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{34} + return fileDescriptor_status_731fb2d638c68f09, []int{34} } func (m *GetFilesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1524,7 +1548,7 @@ func (m *GetFilesResponse) Reset() { *m = GetFilesResponse{} } func (m *GetFilesResponse) String() string { return proto.CompactTextString(m) } func (*GetFilesResponse) ProtoMessage() {} func (*GetFilesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{35} + return fileDescriptor_status_731fb2d638c68f09, []int{35} } func (m *GetFilesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1561,7 +1585,7 @@ func (m *ProfileRequest) Reset() { *m = ProfileRequest{} } func (m *ProfileRequest) String() string { return proto.CompactTextString(m) } func (*ProfileRequest) ProtoMessage() {} func (*ProfileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{36} + return fileDescriptor_status_731fb2d638c68f09, []int{36} } func (m *ProfileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1596,7 +1620,7 @@ func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } func (m *MetricsRequest) String() string { return proto.CompactTextString(m) } func (*MetricsRequest) ProtoMessage() {} func (*MetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{37} + return fileDescriptor_status_731fb2d638c68f09, []int{37} } func (m *MetricsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1630,7 +1654,7 @@ func (m *RaftRangeNode) Reset() { *m = RaftRangeNode{} } func (m *RaftRangeNode) String() string { return proto.CompactTextString(m) } func (*RaftRangeNode) ProtoMessage() {} func (*RaftRangeNode) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{38} + return fileDescriptor_status_731fb2d638c68f09, []int{38} } func (m *RaftRangeNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1663,7 +1687,7 @@ func (m *RaftRangeError) Reset() { *m = RaftRangeError{} } func (m *RaftRangeError) String() string { return proto.CompactTextString(m) } func (*RaftRangeError) ProtoMessage() {} func (*RaftRangeError) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{39} + return fileDescriptor_status_731fb2d638c68f09, []int{39} } func (m *RaftRangeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1698,7 +1722,7 @@ func (m *RaftRangeStatus) Reset() { *m = RaftRangeStatus{} } func (m *RaftRangeStatus) String() string { return proto.CompactTextString(m) } func (*RaftRangeStatus) ProtoMessage() {} func (*RaftRangeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{40} + return fileDescriptor_status_731fb2d638c68f09, []int{40} } func (m *RaftRangeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1731,7 +1755,7 @@ func (m *RaftDebugRequest) Reset() { *m = RaftDebugRequest{} } func (m *RaftDebugRequest) String() string { return proto.CompactTextString(m) } func (*RaftDebugRequest) ProtoMessage() {} func (*RaftDebugRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{41} + return fileDescriptor_status_731fb2d638c68f09, []int{41} } func (m *RaftDebugRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1765,7 +1789,7 @@ func (m *RaftDebugResponse) Reset() { *m = RaftDebugResponse{} } func (m *RaftDebugResponse) String() string { return proto.CompactTextString(m) } func (*RaftDebugResponse) ProtoMessage() {} func (*RaftDebugResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{42} + return fileDescriptor_status_731fb2d638c68f09, []int{42} } func (m *RaftDebugResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1804,7 +1828,7 @@ func (m *TxnInfo) Reset() { *m = TxnInfo{} } func (m *TxnInfo) String() string { return proto.CompactTextString(m) } func (*TxnInfo) ProtoMessage() {} func (*TxnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{43} + return fileDescriptor_status_731fb2d638c68f09, []int{43} } func (m *TxnInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1850,7 +1874,7 @@ func (m *ActiveQuery) Reset() { *m = ActiveQuery{} } func (m *ActiveQuery) String() string { return proto.CompactTextString(m) } func (*ActiveQuery) ProtoMessage() {} func (*ActiveQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{44} + return fileDescriptor_status_731fb2d638c68f09, []int{44} } func (m *ActiveQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1885,7 +1909,7 @@ func (m *ListSessionsRequest) Reset() { *m = ListSessionsRequest{} } func (m *ListSessionsRequest) String() string { return proto.CompactTextString(m) } func (*ListSessionsRequest) ProtoMessage() {} func (*ListSessionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{45} + return fileDescriptor_status_731fb2d638c68f09, []int{45} } func (m *ListSessionsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1944,7 +1968,7 @@ func (m *Session) Reset() { *m = Session{} } func (m *Session) String() string { return proto.CompactTextString(m) } func (*Session) ProtoMessage() {} func (*Session) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{46} + return fileDescriptor_status_731fb2d638c68f09, []int{46} } func (m *Session) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1981,7 +2005,7 @@ func (m *ListSessionsError) Reset() { *m = ListSessionsError{} } func (m *ListSessionsError) String() string { return proto.CompactTextString(m) } func (*ListSessionsError) ProtoMessage() {} func (*ListSessionsError) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{47} + return fileDescriptor_status_731fb2d638c68f09, []int{47} } func (m *ListSessionsError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2018,7 +2042,7 @@ func (m *ListSessionsResponse) Reset() { *m = ListSessionsResponse{} } func (m *ListSessionsResponse) String() string { return proto.CompactTextString(m) } func (*ListSessionsResponse) ProtoMessage() {} func (*ListSessionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{48} + return fileDescriptor_status_731fb2d638c68f09, []int{48} } func (m *ListSessionsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2063,7 +2087,7 @@ func (m *CancelQueryRequest) Reset() { *m = CancelQueryRequest{} } func (m *CancelQueryRequest) String() string { return proto.CompactTextString(m) } func (*CancelQueryRequest) ProtoMessage() {} func (*CancelQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{49} + return fileDescriptor_status_731fb2d638c68f09, []int{49} } func (m *CancelQueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2100,7 +2124,7 @@ func (m *CancelQueryResponse) Reset() { *m = CancelQueryResponse{} } func (m *CancelQueryResponse) String() string { return proto.CompactTextString(m) } func (*CancelQueryResponse) ProtoMessage() {} func (*CancelQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{50} + return fileDescriptor_status_731fb2d638c68f09, []int{50} } func (m *CancelQueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2140,7 +2164,7 @@ func (m *CancelSessionRequest) Reset() { *m = CancelSessionRequest{} } func (m *CancelSessionRequest) String() string { return proto.CompactTextString(m) } func (*CancelSessionRequest) ProtoMessage() {} func (*CancelSessionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{51} + return fileDescriptor_status_731fb2d638c68f09, []int{51} } func (m *CancelSessionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +2198,7 @@ func (m *CancelSessionResponse) Reset() { *m = CancelSessionResponse{} } func (m *CancelSessionResponse) String() string { return proto.CompactTextString(m) } func (*CancelSessionResponse) ProtoMessage() {} func (*CancelSessionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{52} + return fileDescriptor_status_731fb2d638c68f09, []int{52} } func (m *CancelSessionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2209,7 +2233,7 @@ func (m *SpanStatsRequest) Reset() { *m = SpanStatsRequest{} } func (m *SpanStatsRequest) String() string { return proto.CompactTextString(m) } func (*SpanStatsRequest) ProtoMessage() {} func (*SpanStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{53} + return fileDescriptor_status_731fb2d638c68f09, []int{53} } func (m *SpanStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2244,7 +2268,7 @@ func (m *SpanStatsResponse) Reset() { *m = SpanStatsResponse{} } func (m *SpanStatsResponse) String() string { return proto.CompactTextString(m) } func (*SpanStatsResponse) ProtoMessage() {} func (*SpanStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{54} + return fileDescriptor_status_731fb2d638c68f09, []int{54} } func (m *SpanStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2278,7 +2302,7 @@ func (m *ProblemRangesRequest) Reset() { *m = ProblemRangesRequest{} } func (m *ProblemRangesRequest) String() string { return proto.CompactTextString(m) } func (*ProblemRangesRequest) ProtoMessage() {} func (*ProblemRangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{55} + return fileDescriptor_status_731fb2d638c68f09, []int{55} } func (m *ProblemRangesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2313,7 +2337,7 @@ func (m *ProblemRangesResponse) Reset() { *m = ProblemRangesResponse{} } func (m *ProblemRangesResponse) String() string { return proto.CompactTextString(m) } func (*ProblemRangesResponse) ProtoMessage() {} func (*ProblemRangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{56} + return fileDescriptor_status_731fb2d638c68f09, []int{56} } func (m *ProblemRangesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2354,7 +2378,7 @@ func (m *ProblemRangesResponse_NodeProblems) Reset() { *m = ProblemRange func (m *ProblemRangesResponse_NodeProblems) String() string { return proto.CompactTextString(m) } func (*ProblemRangesResponse_NodeProblems) ProtoMessage() {} func (*ProblemRangesResponse_NodeProblems) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{56, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{56, 0} } func (m *ProblemRangesResponse_NodeProblems) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2388,7 +2412,7 @@ func (m *HotRangesRequest) Reset() { *m = HotRangesRequest{} } func (m *HotRangesRequest) String() string { return proto.CompactTextString(m) } func (*HotRangesRequest) ProtoMessage() {} func (*HotRangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{57} + return fileDescriptor_status_731fb2d638c68f09, []int{57} } func (m *HotRangesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2423,7 +2447,7 @@ func (m *HotRangesResponse) Reset() { *m = HotRangesResponse{} } func (m *HotRangesResponse) String() string { return proto.CompactTextString(m) } func (*HotRangesResponse) ProtoMessage() {} func (*HotRangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{58} + return fileDescriptor_status_731fb2d638c68f09, []int{58} } func (m *HotRangesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2457,7 +2481,7 @@ func (m *HotRangesResponse_HotRange) Reset() { *m = HotRangesResponse_Ho func (m *HotRangesResponse_HotRange) String() string { return proto.CompactTextString(m) } func (*HotRangesResponse_HotRange) ProtoMessage() {} func (*HotRangesResponse_HotRange) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{58, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{58, 0} } func (m *HotRangesResponse_HotRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2491,7 +2515,7 @@ func (m *HotRangesResponse_StoreResponse) Reset() { *m = HotRangesRespon func (m *HotRangesResponse_StoreResponse) String() string { return proto.CompactTextString(m) } func (*HotRangesResponse_StoreResponse) ProtoMessage() {} func (*HotRangesResponse_StoreResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{58, 1} + return fileDescriptor_status_731fb2d638c68f09, []int{58, 1} } func (m *HotRangesResponse_StoreResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2525,7 +2549,7 @@ func (m *HotRangesResponse_NodeResponse) Reset() { *m = HotRangesRespons func (m *HotRangesResponse_NodeResponse) String() string { return proto.CompactTextString(m) } func (*HotRangesResponse_NodeResponse) ProtoMessage() {} func (*HotRangesResponse_NodeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{58, 2} + return fileDescriptor_status_731fb2d638c68f09, []int{58, 2} } func (m *HotRangesResponse_NodeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2558,7 +2582,7 @@ func (m *RangeRequest) Reset() { *m = RangeRequest{} } func (m *RangeRequest) String() string { return proto.CompactTextString(m) } func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{59} + return fileDescriptor_status_731fb2d638c68f09, []int{59} } func (m *RangeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2594,7 +2618,7 @@ func (m *RangeResponse) Reset() { *m = RangeResponse{} } func (m *RangeResponse) String() string { return proto.CompactTextString(m) } func (*RangeResponse) ProtoMessage() {} func (*RangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{60} + return fileDescriptor_status_731fb2d638c68f09, []int{60} } func (m *RangeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2629,7 +2653,7 @@ func (m *RangeResponse_NodeResponse) Reset() { *m = RangeResponse_NodeRe func (m *RangeResponse_NodeResponse) String() string { return proto.CompactTextString(m) } func (*RangeResponse_NodeResponse) ProtoMessage() {} func (*RangeResponse_NodeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{60, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{60, 0} } func (m *RangeResponse_NodeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2665,7 +2689,7 @@ func (m *DiagnosticsRequest) Reset() { *m = DiagnosticsRequest{} } func (m *DiagnosticsRequest) String() string { return proto.CompactTextString(m) } func (*DiagnosticsRequest) ProtoMessage() {} func (*DiagnosticsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{61} + return fileDescriptor_status_731fb2d638c68f09, []int{61} } func (m *DiagnosticsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2700,7 +2724,7 @@ func (m *StoresRequest) Reset() { *m = StoresRequest{} } func (m *StoresRequest) String() string { return proto.CompactTextString(m) } func (*StoresRequest) ProtoMessage() {} func (*StoresRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{62} + return fileDescriptor_status_731fb2d638c68f09, []int{62} } func (m *StoresRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2743,7 +2767,7 @@ func (m *StoreDetails) Reset() { *m = StoreDetails{} } func (m *StoreDetails) String() string { return proto.CompactTextString(m) } func (*StoreDetails) ProtoMessage() {} func (*StoreDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{63} + return fileDescriptor_status_731fb2d638c68f09, []int{63} } func (m *StoreDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2776,7 +2800,7 @@ func (m *StoresResponse) Reset() { *m = StoresResponse{} } func (m *StoresResponse) String() string { return proto.CompactTextString(m) } func (*StoresResponse) ProtoMessage() {} func (*StoresResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{64} + return fileDescriptor_status_731fb2d638c68f09, []int{64} } func (m *StoresResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2809,7 +2833,7 @@ func (m *StatementsRequest) Reset() { *m = StatementsRequest{} } func (m *StatementsRequest) String() string { return proto.CompactTextString(m) } func (*StatementsRequest) ProtoMessage() {} func (*StatementsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{65} + return fileDescriptor_status_731fb2d638c68f09, []int{65} } func (m *StatementsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2847,7 +2871,7 @@ func (m *StatementsResponse) Reset() { *m = StatementsResponse{} } func (m *StatementsResponse) String() string { return proto.CompactTextString(m) } func (*StatementsResponse) ProtoMessage() {} func (*StatementsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{66} + return fileDescriptor_status_731fb2d638c68f09, []int{66} } func (m *StatementsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2885,7 +2909,7 @@ func (m *StatementsResponse_ExtendedStatementStatisticsKey) String() string { } func (*StatementsResponse_ExtendedStatementStatisticsKey) ProtoMessage() {} func (*StatementsResponse_ExtendedStatementStatisticsKey) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{66, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{66, 0} } func (m *StatementsResponse_ExtendedStatementStatisticsKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2923,7 +2947,7 @@ func (m *StatementsResponse_CollectedStatementStatistics) String() string { } func (*StatementsResponse_CollectedStatementStatistics) ProtoMessage() {} func (*StatementsResponse_CollectedStatementStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{66, 1} + return fileDescriptor_status_731fb2d638c68f09, []int{66, 1} } func (m *StatementsResponse_CollectedStatementStatistics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2960,7 +2984,7 @@ func (m *StatementDiagnosticsReport) Reset() { *m = StatementDiagnostics func (m *StatementDiagnosticsReport) String() string { return proto.CompactTextString(m) } func (*StatementDiagnosticsReport) ProtoMessage() {} func (*StatementDiagnosticsReport) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{67} + return fileDescriptor_status_731fb2d638c68f09, []int{67} } func (m *StatementDiagnosticsReport) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2995,7 +3019,7 @@ func (m *CreateStatementDiagnosticsReportRequest) Reset() { func (m *CreateStatementDiagnosticsReportRequest) String() string { return proto.CompactTextString(m) } func (*CreateStatementDiagnosticsReportRequest) ProtoMessage() {} func (*CreateStatementDiagnosticsReportRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{68} + return fileDescriptor_status_731fb2d638c68f09, []int{68} } func (m *CreateStatementDiagnosticsReportRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3030,7 +3054,7 @@ func (m *CreateStatementDiagnosticsReportResponse) Reset() { func (m *CreateStatementDiagnosticsReportResponse) String() string { return proto.CompactTextString(m) } func (*CreateStatementDiagnosticsReportResponse) ProtoMessage() {} func (*CreateStatementDiagnosticsReportResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{69} + return fileDescriptor_status_731fb2d638c68f09, []int{69} } func (m *CreateStatementDiagnosticsReportResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3062,7 +3086,7 @@ func (m *StatementDiagnosticsReportsRequest) Reset() { *m = StatementDia func (m *StatementDiagnosticsReportsRequest) String() string { return proto.CompactTextString(m) } func (*StatementDiagnosticsReportsRequest) ProtoMessage() {} func (*StatementDiagnosticsReportsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{70} + return fileDescriptor_status_731fb2d638c68f09, []int{70} } func (m *StatementDiagnosticsReportsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3095,7 +3119,7 @@ func (m *StatementDiagnosticsReportsResponse) Reset() { *m = StatementDi func (m *StatementDiagnosticsReportsResponse) String() string { return proto.CompactTextString(m) } func (*StatementDiagnosticsReportsResponse) ProtoMessage() {} func (*StatementDiagnosticsReportsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{71} + return fileDescriptor_status_731fb2d638c68f09, []int{71} } func (m *StatementDiagnosticsReportsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3131,7 +3155,7 @@ func (m *StatementDiagnostics) Reset() { *m = StatementDiagnostics{} } func (m *StatementDiagnostics) String() string { return proto.CompactTextString(m) } func (*StatementDiagnostics) ProtoMessage() {} func (*StatementDiagnostics) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{72} + return fileDescriptor_status_731fb2d638c68f09, []int{72} } func (m *StatementDiagnostics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3164,7 +3188,7 @@ func (m *StatementDiagnosticsRequest) Reset() { *m = StatementDiagnostic func (m *StatementDiagnosticsRequest) String() string { return proto.CompactTextString(m) } func (*StatementDiagnosticsRequest) ProtoMessage() {} func (*StatementDiagnosticsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{73} + return fileDescriptor_status_731fb2d638c68f09, []int{73} } func (m *StatementDiagnosticsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3197,7 +3221,7 @@ func (m *StatementDiagnosticsResponse) Reset() { *m = StatementDiagnosti func (m *StatementDiagnosticsResponse) String() string { return proto.CompactTextString(m) } func (*StatementDiagnosticsResponse) ProtoMessage() {} func (*StatementDiagnosticsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{74} + return fileDescriptor_status_731fb2d638c68f09, []int{74} } func (m *StatementDiagnosticsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3230,7 +3254,7 @@ func (m *JobRegistryStatusRequest) Reset() { *m = JobRegistryStatusReque func (m *JobRegistryStatusRequest) String() string { return proto.CompactTextString(m) } func (*JobRegistryStatusRequest) ProtoMessage() {} func (*JobRegistryStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{75} + return fileDescriptor_status_731fb2d638c68f09, []int{75} } func (m *JobRegistryStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3264,7 +3288,7 @@ func (m *JobRegistryStatusResponse) Reset() { *m = JobRegistryStatusResp func (m *JobRegistryStatusResponse) String() string { return proto.CompactTextString(m) } func (*JobRegistryStatusResponse) ProtoMessage() {} func (*JobRegistryStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{76} + return fileDescriptor_status_731fb2d638c68f09, []int{76} } func (m *JobRegistryStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3297,7 +3321,7 @@ func (m *JobRegistryStatusResponse_Job) Reset() { *m = JobRegistryStatus func (m *JobRegistryStatusResponse_Job) String() string { return proto.CompactTextString(m) } func (*JobRegistryStatusResponse_Job) ProtoMessage() {} func (*JobRegistryStatusResponse_Job) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{76, 0} + return fileDescriptor_status_731fb2d638c68f09, []int{76, 0} } func (m *JobRegistryStatusResponse_Job) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3330,7 +3354,7 @@ func (m *JobStatusRequest) Reset() { *m = JobStatusRequest{} } func (m *JobStatusRequest) String() string { return proto.CompactTextString(m) } func (*JobStatusRequest) ProtoMessage() {} func (*JobStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{77} + return fileDescriptor_status_731fb2d638c68f09, []int{77} } func (m *JobStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3363,7 +3387,7 @@ func (m *JobStatusResponse) Reset() { *m = JobStatusResponse{} } func (m *JobStatusResponse) String() string { return proto.CompactTextString(m) } func (*JobStatusResponse) ProtoMessage() {} func (*JobStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_status_fee345378c170d2b, []int{78} + return fileDescriptor_status_731fb2d638c68f09, []int{78} } func (m *JobStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,6 +5956,26 @@ func (m *LogsRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintStatus(dAtA, i, uint64(len(m.Pattern))) i += copy(dAtA[i:], m.Pattern) } + if m.Redact { + dAtA[i] = 0x38 + i++ + if m.Redact { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeepRedactable { + dAtA[i] = 0x40 + i++ + if m.KeepRedactable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -6046,6 +6090,26 @@ func (m *LogFileRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintStatus(dAtA, i, uint64(len(m.File))) i += copy(dAtA[i:], m.File) } + if m.Redact { + dAtA[i] = 0x18 + i++ + if m.Redact { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeepRedactable { + dAtA[i] = 0x20 + i++ + if m.KeepRedactable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -8770,6 +8834,12 @@ func (m *LogsRequest) Size() (n int) { if l > 0 { n += 1 + l + sovStatus(uint64(l)) } + if m.Redact { + n += 2 + } + if m.KeepRedactable { + n += 2 + } return n } @@ -8830,6 +8900,12 @@ func (m *LogFileRequest) Size() (n int) { if l > 0 { n += 1 + l + sovStatus(uint64(l)) } + if m.Redact { + n += 2 + } + if m.KeepRedactable { + n += 2 + } return n } @@ -13924,6 +14000,46 @@ func (m *LogsRequest) Unmarshal(dAtA []byte) error { } m.Pattern = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Redact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Redact = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRedactable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRedactable = bool(v != 0) default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) @@ -14273,6 +14389,46 @@ func (m *LogFileRequest) Unmarshal(dAtA []byte) error { } m.File = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Redact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Redact = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRedactable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRedactable = bool(v != 0) default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) @@ -21311,374 +21467,376 @@ var ( ) func init() { - proto.RegisterFile("server/serverpb/status.proto", fileDescriptor_status_fee345378c170d2b) -} - -var fileDescriptor_status_fee345378c170d2b = []byte{ - // 5831 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x7c, 0xdb, 0x6f, 0x1c, 0xc9, - 0x75, 0xb7, 0x7a, 0x6e, 0x9c, 0x39, 0xc3, 0xcb, 0xb0, 0x44, 0x52, 0xa3, 0x91, 0x96, 0x23, 0xb7, - 0x76, 0x75, 0xdb, 0xdd, 0x99, 0x5d, 0xed, 0xca, 0x2b, 0xef, 0xe7, 0x5d, 0x9b, 0x37, 0x49, 0x94, - 0xb8, 0xba, 0x34, 0xa9, 0xcf, 0x1f, 0xd6, 0xfe, 0xb6, 0xbf, 0x9e, 0xe9, 0xe2, 0xa8, 0xc5, 0x61, - 0xf7, 0xa8, 0xbb, 0x87, 0x1f, 0xc7, 0x1b, 0xd9, 0xce, 0xe6, 0xe6, 0x38, 0x8e, 0x6f, 0x71, 0x02, - 0x3f, 0x24, 0x40, 0xe0, 0x87, 0x38, 0x2f, 0x09, 0x1c, 0xe4, 0x25, 0x09, 0x90, 0x04, 0xb9, 0x20, - 0x31, 0x10, 0x20, 0x30, 0x90, 0x3c, 0x18, 0x09, 0x40, 0x27, 0x74, 0x1e, 0x02, 0xe4, 0x3f, 0x30, - 0x90, 0x20, 0xa8, 0x53, 0xd5, 0x3d, 0xd5, 0x33, 0xc3, 0x9e, 0x21, 0xb9, 0x5a, 0xe4, 0x61, 0x57, - 0xd3, 0x55, 0x75, 0x4e, 0xfd, 0xea, 0xd4, 0xa9, 0x53, 0xa7, 0x4e, 0x9d, 0x22, 0x9c, 0xf5, 0xa8, - 0xbb, 0x43, 0xdd, 0x2a, 0xff, 0xa7, 0x55, 0xab, 0x7a, 0xbe, 0xe1, 0xb7, 0xbd, 0x4a, 0xcb, 0x75, - 0x7c, 0x87, 0x9c, 0xae, 0x3b, 0xf5, 0x2d, 0xd7, 0x31, 0xea, 0x8f, 0x2a, 0xbc, 0x41, 0x25, 0x68, - 0x57, 0x2a, 0xd4, 0xda, 0x56, 0xd3, 0xac, 0x5a, 0xf6, 0xa6, 0xc3, 0x1b, 0x97, 0x4e, 0x36, 0x1c, - 0xcf, 0xb3, 0x5a, 0x55, 0xfe, 0x8f, 0x28, 0x9c, 0x7b, 0xec, 0xd4, 0xbc, 0x2a, 0xfb, 0x5f, 0xab, - 0x86, 0xff, 0x88, 0xf2, 0x53, 0xc8, 0xb5, 0x55, 0xab, 0x1a, 0xad, 0x96, 0xce, 0xfa, 0x0c, 0x2a, - 0x48, 0x50, 0x61, 0x1a, 0xbe, 0x11, 0x30, 0x09, 0xca, 0xb6, 0xa9, 0x6f, 0x48, 0xe5, 0x17, 0x04, - 0x78, 0xd3, 0x32, 0x1a, 0xb6, 0xe3, 0xf9, 0x56, 0x9d, 0xf5, 0x22, 0x7d, 0x89, 0x76, 0xe7, 0x83, - 0x41, 0xe2, 0xd8, 0xc4, 0x3f, 0x3d, 0x63, 0x2d, 0x3d, 0xe7, 0xf9, 0x8e, 0x6b, 0x34, 0x68, 0x95, - 0xda, 0x0d, 0xcb, 0xa6, 0xad, 0x9a, 0xf8, 0x21, 0xaa, 0xcf, 0xf4, 0x55, 0x6f, 0xef, 0xd4, 0xeb, - 0xa2, 0x72, 0xbe, 0xaf, 0xd2, 0x75, 0xea, 0x5b, 0x9e, 0x59, 0x13, 0xf5, 0x97, 0xb7, 0x76, 0xaa, - 0x5b, 0x3b, 0x02, 0x45, 0xf0, 0xa3, 0x55, 0xab, 0x36, 0xa9, 0xe1, 0x51, 0x3d, 0x02, 0x43, 0x3d, - 0xa0, 0x29, 0x6b, 0x14, 0x60, 0x79, 0xe1, 0x20, 0x76, 0xd6, 0x0e, 0xb5, 0xa9, 0x17, 0x8a, 0xb2, - 0xed, 0x5b, 0xcd, 0x6a, 0xd3, 0x69, 0xb0, 0xff, 0x44, 0x59, 0x09, 0xcb, 0xda, 0xb6, 0x4b, 0x3d, - 0xa7, 0xb9, 0x43, 0x4d, 0xdd, 0x30, 0x4d, 0x37, 0x18, 0x22, 0xf5, 0xeb, 0x66, 0xd5, 0x35, 0x36, - 0x7d, 0xfc, 0x1f, 0x1b, 0x85, 0xb1, 0xe9, 0x8b, 0xca, 0x99, 0x86, 0xd3, 0x70, 0xf0, 0x67, 0x95, - 0xfd, 0x12, 0xa5, 0x67, 0x1b, 0x8e, 0xd3, 0x68, 0xd2, 0xaa, 0xd1, 0xb2, 0xaa, 0x86, 0x6d, 0x3b, - 0xbe, 0xe1, 0x5b, 0x8e, 0x1d, 0x00, 0x28, 0x8b, 0x5a, 0xfc, 0xaa, 0xb5, 0x37, 0xab, 0xbe, 0xb5, - 0x4d, 0x3d, 0xdf, 0xd8, 0x16, 0xda, 0xa1, 0x56, 0xe0, 0xe4, 0x12, 0x75, 0x7d, 0x6b, 0xd3, 0xaa, - 0x1b, 0x3e, 0xf5, 0x34, 0xfa, 0xa4, 0x4d, 0x3d, 0x9f, 0x9c, 0x82, 0x31, 0xdb, 0x31, 0xa9, 0x6e, - 0x99, 0x45, 0xe5, 0x9c, 0x72, 0x29, 0xa7, 0x65, 0xd8, 0xe7, 0xaa, 0xa9, 0xfe, 0x67, 0x0a, 0x88, - 0x44, 0xb0, 0x4c, 0x7d, 0xc3, 0x6a, 0x7a, 0xe4, 0x01, 0xa4, 0xfc, 0x4e, 0x8b, 0x62, 0xe3, 0xc9, - 0xab, 0x6f, 0x55, 0x0e, 0xd4, 0xda, 0x4a, 0x3f, 0xb1, 0x5c, 0xb4, 0xd1, 0x69, 0x51, 0x0d, 0x59, - 0x91, 0xf3, 0x30, 0x41, 0x5d, 0xd7, 0x71, 0xf5, 0x6d, 0xea, 0x79, 0x46, 0x83, 0x16, 0x13, 0x08, - 0x64, 0x1c, 0x0b, 0xdf, 0xe1, 0x65, 0x84, 0x40, 0x8a, 0x69, 0x63, 0x31, 0x79, 0x4e, 0xb9, 0x34, - 0xae, 0xe1, 0x6f, 0xa2, 0x41, 0x66, 0xd3, 0xa2, 0x4d, 0xd3, 0x2b, 0xa6, 0xce, 0x25, 0x2f, 0xe5, - 0xaf, 0xbe, 0x7e, 0x38, 0x34, 0x37, 0x90, 0x76, 0x31, 0xf5, 0x83, 0xbd, 0xf2, 0x09, 0x4d, 0x70, - 0x2a, 0xfd, 0x61, 0x02, 0x32, 0xbc, 0x82, 0xcc, 0x41, 0xc6, 0xf2, 0xbc, 0x36, 0x75, 0x03, 0xc9, - 0xf0, 0x2f, 0x52, 0x84, 0x31, 0xaf, 0x5d, 0x7b, 0x4c, 0xeb, 0xbe, 0x40, 0x1a, 0x7c, 0x92, 0xe7, - 0x00, 0x76, 0x8c, 0xa6, 0x65, 0xea, 0x9b, 0xae, 0xb3, 0x8d, 0x50, 0x93, 0x5a, 0x0e, 0x4b, 0x6e, - 0xb8, 0xce, 0x36, 0x29, 0x43, 0x9e, 0x57, 0xb7, 0x6d, 0xdf, 0x6a, 0x16, 0x53, 0x58, 0xcf, 0x29, - 0x1e, 0xb2, 0x12, 0x72, 0x16, 0x72, 0x4c, 0x47, 0xa8, 0xe7, 0x51, 0xaf, 0x98, 0x3e, 0x97, 0xbc, - 0x94, 0xd3, 0xba, 0x05, 0xa4, 0x0a, 0x27, 0x3d, 0xab, 0x61, 0x1b, 0x7e, 0xdb, 0xa5, 0xba, 0xd1, - 0x6c, 0x38, 0xae, 0xe5, 0x3f, 0xda, 0x2e, 0x66, 0x10, 0x03, 0x09, 0xab, 0x16, 0x82, 0x1a, 0x06, - 0xa7, 0xd5, 0xae, 0x35, 0xad, 0xba, 0xbe, 0x45, 0x3b, 0xc5, 0x31, 0x6c, 0x97, 0xe3, 0x25, 0x77, - 0x68, 0x87, 0x9c, 0x81, 0xdc, 0x16, 0xed, 0xe8, 0x6d, 0x94, 0x79, 0x16, 0x7b, 0xcb, 0x6e, 0xd1, - 0xce, 0x43, 0x94, 0xf7, 0x4b, 0x40, 0xe8, 0xae, 0x4f, 0x6d, 0x93, 0x9a, 0x7a, 0xb7, 0x55, 0x0e, - 0x5b, 0x15, 0x82, 0x9a, 0x3b, 0xa2, 0xb5, 0xfa, 0x00, 0xa6, 0x7a, 0xe6, 0x96, 0x64, 0x20, 0xb1, - 0xb4, 0x50, 0x38, 0x41, 0xb2, 0x90, 0xba, 0x7b, 0x6f, 0x79, 0xa5, 0xa0, 0x90, 0x09, 0xc8, 0x2d, - 0xad, 0xad, 0xae, 0xdc, 0xdd, 0xd0, 0x97, 0x16, 0x0a, 0x09, 0x02, 0x90, 0xe1, 0x9f, 0x85, 0x24, - 0xc9, 0x41, 0xfa, 0xe1, 0x2a, 0x2b, 0x4e, 0x31, 0xba, 0x87, 0xab, 0x85, 0xb4, 0xea, 0xc0, 0x4c, - 0x54, 0x5f, 0xbd, 0x96, 0x63, 0x7b, 0x94, 0x7c, 0x06, 0xc6, 0xeb, 0x52, 0x79, 0x51, 0xc1, 0xa9, - 0x7f, 0xf9, 0x50, 0x53, 0x2f, 0xe6, 0x3c, 0xc2, 0x48, 0xad, 0xc2, 0xa4, 0xa8, 0x1e, 0xb6, 0x36, - 0x6e, 0xa7, 0xb2, 0x89, 0x42, 0x52, 0xbd, 0x0b, 0xb0, 0xde, 0xf1, 0x7c, 0xba, 0xbd, 0x6a, 0x6f, - 0x3a, 0x6c, 0x72, 0x3d, 0xfc, 0xd2, 0x99, 0x9d, 0x16, 0x04, 0xe0, 0x45, 0x1a, 0x6c, 0x51, 0xd7, - 0xa6, 0x4d, 0xde, 0x80, 0xab, 0x0e, 0xf0, 0x22, 0xd6, 0x40, 0xfd, 0x6a, 0x12, 0xa6, 0x42, 0x04, - 0x62, 0xb4, 0xef, 0x46, 0x21, 0xa4, 0x17, 0x17, 0xf6, 0xf7, 0xca, 0x99, 0xbb, 0x0c, 0xc6, 0xf2, - 0x4f, 0xf7, 0xca, 0xaf, 0x35, 0x2c, 0xff, 0x51, 0xbb, 0x56, 0xa9, 0x3b, 0xdb, 0xd5, 0x50, 0x00, - 0x66, 0xad, 0xfb, 0xbb, 0xda, 0xda, 0x6a, 0x54, 0x85, 0x49, 0xaf, 0x70, 0xb2, 0x60, 0x14, 0xe4, - 0x6d, 0x18, 0x13, 0xca, 0x85, 0x60, 0xf2, 0x57, 0xe7, 0x25, 0x21, 0x32, 0xdb, 0x55, 0x79, 0x18, - 0xda, 0xae, 0x05, 0xd3, 0x74, 0x85, 0xd4, 0x02, 0x22, 0xf2, 0x26, 0x00, 0x6e, 0x4c, 0x7c, 0x3c, - 0x49, 0x64, 0x31, 0x2b, 0xb1, 0xc0, 0xca, 0x0a, 0x1b, 0x9a, 0xa0, 0xcc, 0x61, 0x09, 0x0a, 0x63, - 0x2d, 0x2a, 0xad, 0x14, 0x12, 0xbf, 0x10, 0x33, 0x89, 0x5d, 0x49, 0x0b, 0x66, 0xb2, 0x68, 0xd7, - 0x21, 0xef, 0x3d, 0x69, 0xea, 0xc1, 0x68, 0xd2, 0x23, 0x8d, 0x86, 0x30, 0x36, 0xfb, 0x7b, 0x65, - 0x58, 0x7f, 0xb0, 0xb6, 0xc0, 0x29, 0x35, 0xf0, 0x9e, 0x34, 0xc5, 0x6f, 0x75, 0x12, 0xc6, 0x99, - 0xc0, 0x02, 0x6d, 0x50, 0xbf, 0x9d, 0x84, 0x09, 0x51, 0x20, 0x26, 0xe7, 0x16, 0xa4, 0x99, 0x28, - 0x03, 0x1d, 0x7c, 0x69, 0x00, 0x7c, 0xbe, 0xdd, 0x04, 0xbb, 0x20, 0xce, 0xc0, 0x3a, 0x7e, 0x88, - 0x51, 0x70, 0x06, 0xe4, 0xcf, 0x14, 0x38, 0x19, 0xec, 0x28, 0x7a, 0xad, 0xa3, 0x07, 0x73, 0x9e, - 0x40, 0xc6, 0x6f, 0xc7, 0xc8, 0x25, 0x82, 0xa8, 0xb2, 0x26, 0x78, 0x2c, 0x76, 0x70, 0xae, 0xcd, - 0x15, 0xdb, 0x77, 0x3b, 0x8b, 0xf7, 0xc4, 0x48, 0x0b, 0x3d, 0xd5, 0xcb, 0x1f, 0xfc, 0xf8, 0x68, - 0x1a, 0x54, 0x68, 0xf6, 0xf4, 0x53, 0xda, 0x85, 0xd9, 0x81, 0x7d, 0x93, 0x02, 0x24, 0x99, 0xf1, - 0x41, 0xe5, 0xd5, 0xd8, 0x4f, 0xb2, 0x0a, 0xe9, 0x1d, 0xa3, 0xd9, 0xe6, 0x66, 0x7e, 0xf2, 0xea, - 0x6b, 0xd2, 0xe0, 0xb6, 0x76, 0x2a, 0xc1, 0x16, 0x5b, 0x11, 0xdb, 0xbc, 0xe8, 0x34, 0x60, 0xce, - 0x85, 0xa7, 0x71, 0x0e, 0x6f, 0x26, 0xae, 0x2b, 0xea, 0x05, 0xc8, 0xb3, 0x06, 0x43, 0xf7, 0xb3, - 0xef, 0xa7, 0x20, 0xa7, 0x19, 0x9b, 0x3e, 0xe3, 0xc0, 0xcc, 0x1b, 0xb8, 0xb4, 0xd5, 0xb4, 0xea, - 0x46, 0xd0, 0x32, 0xb5, 0x38, 0xb1, 0xbf, 0x57, 0xce, 0x69, 0xbc, 0x74, 0x75, 0x59, 0xcb, 0x89, - 0x06, 0xab, 0x26, 0xf9, 0x38, 0xc0, 0x23, 0xc3, 0x35, 0xd1, 0x7b, 0xa0, 0x62, 0xb1, 0x4c, 0x57, - 0xf8, 0xc6, 0x5d, 0xb9, 0x65, 0xb8, 0x26, 0x32, 0x0d, 0xb4, 0xfc, 0x51, 0x50, 0xc0, 0x36, 0xad, - 0x26, 0x35, 0x4c, 0x5c, 0x1b, 0x29, 0x0d, 0x7f, 0x93, 0x19, 0x48, 0x73, 0x36, 0x29, 0x84, 0xc7, - 0x3f, 0xd8, 0x9e, 0x62, 0xb4, 0x5a, 0x4d, 0x8b, 0x9a, 0xa8, 0xbd, 0x29, 0x2d, 0xf8, 0x24, 0x1b, - 0x90, 0x6d, 0xb9, 0x4e, 0x03, 0x15, 0x3b, 0x83, 0xea, 0x70, 0x35, 0x46, 0x1d, 0xc2, 0x11, 0x56, - 0xee, 0x0b, 0x22, 0xae, 0x02, 0x1c, 0x5a, 0xc8, 0x89, 0x5c, 0x84, 0x29, 0x86, 0x46, 0xf7, 0x5d, - 0xc3, 0xf6, 0x36, 0xa9, 0x4b, 0x29, 0xee, 0x0f, 0x29, 0x6d, 0x92, 0x15, 0x6f, 0x84, 0xa5, 0xa5, - 0x5f, 0x55, 0x20, 0x1b, 0xb0, 0x62, 0xd8, 0xb7, 0x0d, 0xbf, 0xfe, 0x88, 0x0b, 0x4c, 0xe3, 0x1f, - 0x6c, 0x94, 0x36, 0xdd, 0xe5, 0x9b, 0x61, 0x4a, 0xc3, 0xdf, 0xdd, 0x51, 0x26, 0xe5, 0x51, 0xce, - 0x41, 0xa6, 0x65, 0xb4, 0x3d, 0x6a, 0xe2, 0xe0, 0xb3, 0x9a, 0xf8, 0x22, 0x97, 0xa1, 0xd0, 0xa2, - 0xb6, 0x69, 0xd9, 0x0d, 0xdd, 0xb3, 0x8d, 0x96, 0xf7, 0xc8, 0xf1, 0x85, 0x18, 0xa6, 0x44, 0xf9, - 0xba, 0x28, 0x2e, 0x3d, 0x86, 0x89, 0xc8, 0xc8, 0x64, 0x05, 0x4b, 0x71, 0x05, 0x5b, 0x92, 0x15, - 0x2c, 0x7e, 0x6b, 0xe8, 0x17, 0x97, 0xac, 0x5a, 0xfb, 0x09, 0x98, 0xd0, 0x0c, 0xbb, 0x41, 0xef, - 0xbb, 0x4e, 0xad, 0x49, 0xb7, 0x3d, 0x72, 0x0e, 0xf2, 0x6d, 0xdb, 0xd8, 0x31, 0xac, 0xa6, 0x51, - 0x6b, 0x72, 0x27, 0x28, 0xab, 0xc9, 0x45, 0xe4, 0x1a, 0x9c, 0x62, 0x12, 0xa4, 0xae, 0x6e, 0x3b, - 0xbe, 0xce, 0x9d, 0xce, 0x47, 0x4e, 0xd3, 0xa4, 0x2e, 0xc2, 0xc9, 0x6a, 0x33, 0xbc, 0xfa, 0xae, - 0xe3, 0xaf, 0xb1, 0xca, 0x5b, 0x58, 0x47, 0x9e, 0x87, 0x49, 0xdb, 0xd1, 0x99, 0x46, 0xe9, 0xbc, - 0x1e, 0x05, 0x97, 0xd5, 0xc6, 0x6d, 0x87, 0x61, 0x5c, 0xc3, 0x32, 0x72, 0x09, 0xa6, 0xda, 0xb6, - 0x49, 0x5d, 0xa1, 0x99, 0x7e, 0x28, 0xc8, 0xde, 0x62, 0x72, 0x1a, 0xb2, 0xb6, 0xc3, 0xbb, 0x47, - 0x49, 0x66, 0xb5, 0x31, 0xdb, 0xc1, 0x0e, 0xc9, 0x75, 0x28, 0x3e, 0x69, 0x5b, 0xd4, 0xab, 0x53, - 0xdb, 0xd7, 0xe9, 0x93, 0xb6, 0xd1, 0xf4, 0x74, 0xdf, 0xaa, 0x6f, 0x59, 0x76, 0x03, 0x7d, 0x89, - 0xac, 0x36, 0x17, 0xd6, 0xaf, 0x60, 0xf5, 0x06, 0xaf, 0x25, 0x2f, 0x02, 0xe1, 0x08, 0x9d, 0x86, - 0xee, 0x3b, 0x8e, 0xde, 0x34, 0xdc, 0x06, 0xd7, 0x9b, 0xac, 0x36, 0xc5, 0x6a, 0xd6, 0x9c, 0xc6, - 0x86, 0xe3, 0xac, 0xb1, 0x62, 0x72, 0x01, 0x26, 0x9d, 0x9d, 0x08, 0xd4, 0x2c, 0x36, 0xec, 0x29, - 0x55, 0xb7, 0x60, 0x0a, 0x65, 0xcc, 0xa6, 0xc1, 0xc2, 0x93, 0x04, 0xf3, 0x3d, 0x9e, 0xb4, 0xa9, - 0x6b, 0x51, 0x4f, 0x6f, 0x51, 0x57, 0xf7, 0x68, 0xdd, 0xb1, 0xf9, 0x22, 0x55, 0xb4, 0x82, 0xa8, - 0xb9, 0x4f, 0xdd, 0x75, 0x2c, 0x27, 0x57, 0x60, 0xfa, 0xff, 0xbb, 0x96, 0x1f, 0x6d, 0x9c, 0xc0, - 0xc6, 0x53, 0xbc, 0x22, 0x6c, 0xab, 0xde, 0x02, 0xb8, 0xef, 0x52, 0xdf, 0xef, 0xac, 0xb7, 0x0c, - 0x9b, 0x39, 0x40, 0x9e, 0x6f, 0xb8, 0xbe, 0x1e, 0x28, 0x50, 0x4e, 0xcb, 0x62, 0x01, 0xf3, 0x8e, - 0x4e, 0xc1, 0x18, 0xb5, 0xd1, 0xf7, 0x11, 0x5b, 0x75, 0x86, 0xda, 0xcc, 0xe1, 0x79, 0x33, 0xf5, - 0xef, 0xbf, 0x5d, 0x56, 0xd4, 0xaf, 0x66, 0x99, 0x39, 0xb1, 0x1b, 0x14, 0x37, 0xa0, 0x4f, 0x41, - 0xca, 0x6b, 0x19, 0x36, 0x32, 0x89, 0xdf, 0xc7, 0xba, 0xdd, 0x8b, 0x35, 0x89, 0x84, 0x64, 0x15, - 0x00, 0x45, 0x2b, 0x5b, 0x98, 0xe7, 0x47, 0x51, 0xdc, 0xc0, 0xe8, 0xb8, 0xa1, 0x69, 0xbb, 0x21, - 0x1b, 0x98, 0xfc, 0xd5, 0x2b, 0x43, 0xed, 0x6b, 0x38, 0x8c, 0x60, 0x4f, 0xe2, 0x8b, 0x75, 0x1b, - 0x26, 0x3d, 0xa7, 0xed, 0xd6, 0x69, 0xb8, 0x1b, 0xa5, 0xd1, 0x03, 0xb9, 0xb9, 0xbf, 0x57, 0x1e, - 0x5f, 0xc7, 0x9a, 0xe3, 0xf9, 0x21, 0xe3, 0x5e, 0x97, 0x89, 0x49, 0x9e, 0xc0, 0x94, 0xe8, 0x8e, - 0x21, 0xc3, 0xfe, 0x32, 0xd8, 0xdf, 0xea, 0xfe, 0x5e, 0x79, 0x82, 0xf7, 0xb7, 0xce, 0x6a, 0xb0, - 0xc3, 0xd7, 0x0f, 0xd5, 0xa1, 0xa0, 0xd3, 0x26, 0x3c, 0x89, 0x8d, 0xd9, 0x7f, 0xf0, 0x18, 0x1b, - 0x70, 0xf0, 0x58, 0x82, 0x09, 0xb1, 0x8a, 0x2d, 0x06, 0xac, 0x83, 0x9e, 0x72, 0xfe, 0x6a, 0x51, - 0x12, 0x6b, 0xd0, 0x0d, 0xae, 0xaf, 0xc0, 0xb7, 0x44, 0xa2, 0x5b, 0x9c, 0x86, 0xdc, 0x46, 0x23, - 0x8e, 0x36, 0xa4, 0x98, 0xc3, 0x69, 0xb9, 0x14, 0x3b, 0xb9, 0x92, 0xcd, 0x91, 0x4c, 0x37, 0xb7, - 0x41, 0x62, 0x7e, 0xbd, 0x22, 0xf4, 0xcd, 0xef, 0x40, 0x46, 0xdd, 0x85, 0x25, 0xcf, 0xaf, 0x47, - 0x3e, 0x07, 0x13, 0x4d, 0x66, 0xbf, 0xa9, 0xa7, 0x37, 0x9d, 0xba, 0xd1, 0x2c, 0xe6, 0x91, 0xdf, - 0xab, 0x43, 0xf5, 0x65, 0x8d, 0x51, 0xbd, 0x63, 0xd8, 0x46, 0x83, 0xba, 0x92, 0xda, 0x8c, 0x0b, - 0x6e, 0x6b, 0x8c, 0x19, 0x79, 0x0f, 0x26, 0x03, 0xee, 0x8d, 0xa6, 0x53, 0x33, 0x9a, 0xc5, 0xf1, - 0xe3, 0xb1, 0x0f, 0xc0, 0xde, 0x44, 0x6e, 0xe4, 0x21, 0x8c, 0xcb, 0x27, 0xfa, 0xe2, 0x04, 0x72, - 0x7f, 0x69, 0x38, 0x77, 0x46, 0x14, 0x71, 0xc1, 0xf2, 0xcd, 0x6e, 0x11, 0x3b, 0x81, 0x85, 0xc6, - 0xaf, 0x38, 0x89, 0x06, 0xab, 0x5b, 0xc0, 0x76, 0xe9, 0xc0, 0x52, 0x4e, 0x71, 0xa3, 0x2a, 0x3e, - 0xd5, 0x5f, 0x51, 0xc4, 0x56, 0x31, 0xf4, 0xf0, 0x40, 0x0c, 0xc8, 0xb9, 0xac, 0xa5, 0x6e, 0x99, - 0x1e, 0x3a, 0x78, 0xc9, 0xc5, 0xe5, 0xfd, 0xbd, 0x72, 0x96, 0x2f, 0xc3, 0x65, 0xef, 0xd0, 0xda, - 0x2d, 0x08, 0xb5, 0x2c, 0xb2, 0x5d, 0x35, 0x3d, 0x75, 0x03, 0x26, 0x03, 0x30, 0xc2, 0x55, 0x5d, - 0x84, 0x0c, 0xd6, 0x06, 0xbe, 0xea, 0xf3, 0xc3, 0xb4, 0x46, 0x92, 0xbc, 0xa0, 0x54, 0x2f, 0xc1, - 0xc4, 0x4d, 0x8c, 0x37, 0x0d, 0xf5, 0xb5, 0xbe, 0x9b, 0x80, 0xa9, 0x15, 0x0c, 0xcf, 0x30, 0xb1, - 0x7a, 0x68, 0x22, 0xdf, 0x83, 0x6c, 0xb8, 0xb0, 0xf9, 0x51, 0x66, 0x69, 0x7f, 0xaf, 0x3c, 0x76, - 0xdc, 0x25, 0x3d, 0xe6, 0x89, 0xc5, 0xbc, 0x09, 0x73, 0x6c, 0x32, 0xa8, 0xeb, 0xe9, 0x86, 0x6d, - 0xf2, 0xd5, 0xda, 0x70, 0x8d, 0xed, 0xe0, 0x70, 0xf3, 0x8a, 0x3c, 0x62, 0xae, 0x0e, 0x95, 0x20, - 0x84, 0x54, 0xd9, 0xe0, 0x94, 0x0b, 0xb6, 0x79, 0x2b, 0xa4, 0xd3, 0x66, 0xfc, 0x01, 0xa5, 0xe4, - 0x26, 0xe4, 0x39, 0x99, 0x8e, 0x71, 0x90, 0x24, 0x3a, 0xb1, 0x17, 0xe2, 0x98, 0x73, 0x49, 0x60, - 0xc0, 0x03, 0x68, 0xf8, 0x5b, 0x7d, 0x19, 0x88, 0x24, 0xa3, 0xa1, 0x32, 0xfd, 0xbf, 0x70, 0x32, - 0xd2, 0x5c, 0x4c, 0x6c, 0x68, 0x0d, 0xf8, 0xbc, 0xc6, 0x59, 0x83, 0x9e, 0x19, 0x89, 0x58, 0x03, - 0xf5, 0xff, 0x01, 0x6c, 0xb8, 0x46, 0x9d, 0xae, 0xec, 0x30, 0x45, 0xbf, 0x0e, 0x29, 0xdf, 0xda, - 0xa6, 0x62, 0x3f, 0x2b, 0x55, 0x78, 0x70, 0xa9, 0x12, 0x04, 0x97, 0x2a, 0x1b, 0x41, 0x70, 0x69, - 0x31, 0xcb, 0x98, 0x7c, 0xe3, 0xc7, 0x65, 0x45, 0x43, 0x0a, 0xb6, 0x44, 0xa2, 0x61, 0x9c, 0xe0, - 0x53, 0xfd, 0xbe, 0x02, 0x53, 0x0b, 0x4d, 0x66, 0x6a, 0x7c, 0xc7, 0x5d, 0x76, 0x3b, 0x5a, 0xdb, - 0x66, 0x4a, 0x11, 0xac, 0x05, 0xec, 0x2b, 0xc9, 0x95, 0x42, 0x68, 0xf4, 0x91, 0x57, 0xc2, 0x98, - 0x58, 0x09, 0xe4, 0x2d, 0xc8, 0x50, 0x36, 0x20, 0x4f, 0x9c, 0xa4, 0xe2, 0x76, 0xe6, 0xee, 0xf0, - 0x35, 0x41, 0xa4, 0x5e, 0x85, 0xd9, 0x10, 0x31, 0xf2, 0x0e, 0x66, 0xe9, 0x74, 0x2f, 0xee, 0xb0, - 0x4b, 0xf5, 0x8f, 0x15, 0x98, 0xeb, 0x25, 0x1a, 0x7c, 0x98, 0x4f, 0x7e, 0x98, 0x87, 0xf9, 0x25, - 0x18, 0x33, 0xdd, 0x8e, 0xee, 0xb6, 0x6d, 0xa1, 0xef, 0x71, 0x9a, 0xd0, 0x33, 0x0d, 0x5a, 0xc6, - 0xc4, 0x7f, 0xd5, 0xaf, 0x29, 0x50, 0xe8, 0x62, 0xff, 0x1f, 0x60, 0xc8, 0xde, 0x85, 0x69, 0x09, - 0x8f, 0x10, 0xe3, 0x0a, 0x64, 0xc5, 0x50, 0x47, 0xd1, 0xfa, 0xde, 0xb1, 0x8e, 0xf1, 0xb1, 0x7a, - 0xaa, 0x0a, 0xe3, 0xb7, 0xd7, 0xef, 0xdd, 0x0d, 0xd9, 0x06, 0x11, 0x46, 0xa5, 0x1b, 0x61, 0x54, - 0xbf, 0xab, 0x40, 0x7e, 0xcd, 0x69, 0x0c, 0x37, 0xea, 0x33, 0x90, 0x6e, 0xd2, 0x1d, 0xda, 0x14, - 0x4a, 0xcf, 0x3f, 0xc8, 0x73, 0x00, 0xdc, 0xc1, 0xc4, 0xc5, 0xc4, 0x8f, 0x42, 0xdc, 0xe5, 0x64, - 0x0b, 0x88, 0x69, 0x11, 0x73, 0x31, 0xb1, 0x92, 0x9f, 0x06, 0x99, 0xcb, 0x89, 0x55, 0x05, 0x48, - 0x6e, 0x1b, 0xbb, 0xe8, 0x71, 0xe5, 0x34, 0xf6, 0x93, 0x2d, 0xac, 0x96, 0xe1, 0xfb, 0xd4, 0xb5, - 0x45, 0xc4, 0x2f, 0xf8, 0x54, 0xef, 0x01, 0x59, 0x73, 0x1a, 0xec, 0x34, 0x64, 0x49, 0x16, 0xff, - 0x13, 0xcc, 0x7f, 0xc5, 0x22, 0x21, 0xa4, 0xd3, 0xbd, 0xf1, 0x90, 0xa6, 0xd3, 0xa8, 0xc8, 0xa7, - 0xc3, 0xa0, 0xbd, 0x5a, 0x81, 0x93, 0x6b, 0x4e, 0xe3, 0x86, 0xd5, 0xa4, 0xde, 0x9a, 0xe5, 0xf9, - 0x43, 0x4d, 0xd3, 0x7d, 0x98, 0x89, 0xb6, 0x17, 0x10, 0xae, 0x43, 0x7a, 0x93, 0x15, 0x0a, 0x00, - 0x67, 0x07, 0x01, 0x60, 0x54, 0xb2, 0x35, 0x42, 0x02, 0xf5, 0x2d, 0x98, 0x14, 0x1c, 0x87, 0x4a, - 0x9e, 0x40, 0x8a, 0xd1, 0x08, 0xc1, 0xe3, 0x6f, 0xb5, 0x0e, 0x13, 0xeb, 0xbe, 0x51, 0xdf, 0x1a, - 0x3e, 0x6f, 0x9f, 0x10, 0xe1, 0x6c, 0x1e, 0x8b, 0x88, 0x0d, 0x40, 0x21, 0xc3, 0x6e, 0xd8, 0x5a, - 0x5d, 0x87, 0x14, 0x03, 0x88, 0xc7, 0x5f, 0x43, 0xd8, 0xca, 0x9c, 0x86, 0xbf, 0xd9, 0xc9, 0x82, - 0x01, 0xd1, 0x3d, 0xeb, 0xf3, 0x9c, 0x77, 0x52, 0xcb, 0xb2, 0x82, 0x75, 0xeb, 0xf3, 0x94, 0x94, - 0x20, 0x5b, 0x77, 0x6c, 0x1f, 0xcd, 0x12, 0x0f, 0x67, 0x87, 0xdf, 0xea, 0x6f, 0x2a, 0x30, 0x75, - 0x93, 0xfa, 0x28, 0xcb, 0xa1, 0xe0, 0xcf, 0x40, 0xae, 0x69, 0x79, 0xbe, 0xee, 0xd8, 0xcd, 0x8e, - 0x38, 0x5d, 0x66, 0x59, 0xc1, 0x3d, 0xbb, 0xd9, 0x21, 0x6f, 0x88, 0x91, 0xa5, 0x71, 0x64, 0xe7, - 0x63, 0x46, 0xc6, 0x3a, 0x93, 0xc2, 0xf1, 0x25, 0xc8, 0x0a, 0xcd, 0xe2, 0x01, 0x87, 0x9c, 0x16, - 0x7e, 0xab, 0xab, 0x50, 0xe8, 0xa2, 0x13, 0xb3, 0x7c, 0x2d, 0x3a, 0xcb, 0xe5, 0x21, 0x3d, 0x05, - 0x53, 0xfc, 0x45, 0x98, 0xbc, 0xef, 0x3a, 0x9b, 0xa3, 0x4c, 0xf1, 0x62, 0x64, 0x28, 0x95, 0xd8, - 0xd3, 0x95, 0xcc, 0xb1, 0x22, 0xcd, 0x56, 0x01, 0x52, 0x18, 0x96, 0xce, 0x42, 0xea, 0xd6, 0xca, - 0xc2, 0xfd, 0xc2, 0x09, 0xf5, 0x32, 0x4c, 0xbe, 0x43, 0x7d, 0xd7, 0xaa, 0x0f, 0xdf, 0x7b, 0x7f, - 0x0f, 0xbd, 0xbb, 0x4d, 0x1f, 0x0d, 0x14, 0x33, 0xbc, 0xcf, 0x34, 0x2e, 0xfb, 0x69, 0x48, 0xa3, - 0x01, 0x1c, 0xe9, 0x18, 0xd8, 0x73, 0x74, 0x43, 0x42, 0xf5, 0x0a, 0xf3, 0xff, 0x04, 0xdc, 0x15, - 0x76, 0x98, 0x91, 0xb7, 0x65, 0x25, 0xba, 0x2d, 0x7f, 0x29, 0xc1, 0x0e, 0xe0, 0xa2, 0xb1, 0xf0, - 0x82, 0x9f, 0xf5, 0xb6, 0x7c, 0x13, 0x32, 0x78, 0xc6, 0x0a, 0xb6, 0xe5, 0xcb, 0x43, 0x4e, 0xba, - 0xdd, 0x81, 0x04, 0x2e, 0x29, 0x27, 0x27, 0xcb, 0x41, 0x04, 0x36, 0x89, 0x7c, 0x2e, 0x8d, 0xc2, - 0x87, 0x49, 0x3b, 0x12, 0x7d, 0x55, 0xdb, 0x50, 0x60, 0xb5, 0xcb, 0xb4, 0xd6, 0x6e, 0x04, 0xba, - 0x10, 0xd9, 0xdc, 0x94, 0x67, 0xb2, 0xb9, 0xfd, 0x63, 0x02, 0xa6, 0xa5, 0x7e, 0xc5, 0x72, 0xfa, - 0x9a, 0xd2, 0xe3, 0xaa, 0x5f, 0x1f, 0x32, 0xa8, 0x08, 0x39, 0xef, 0x46, 0x04, 0xfd, 0x3e, 0xc9, - 0x06, 0xf9, 0xc1, 0x8f, 0x8f, 0x08, 0x54, 0xa0, 0xf8, 0xd0, 0x26, 0xab, 0x44, 0x21, 0x2f, 0xa1, - 0x93, 0x03, 0x77, 0x49, 0x1e, 0xb8, 0xfb, 0x74, 0x34, 0x70, 0x77, 0x65, 0x94, 0x8e, 0xfa, 0x03, - 0xc2, 0x7f, 0xa1, 0xc0, 0xd8, 0xc6, 0xae, 0x8d, 0x87, 0x8e, 0x07, 0x90, 0x10, 0x2a, 0x3c, 0xbe, - 0xb8, 0xc0, 0xc0, 0xfc, 0xd3, 0xa8, 0x6b, 0x93, 0xdf, 0xdd, 0xb6, 0x2d, 0xb3, 0xf2, 0xf0, 0xe1, - 0x2a, 0x9b, 0xf9, 0xc4, 0xea, 0xb2, 0x96, 0xb0, 0x4c, 0xf2, 0x26, 0x3a, 0xdc, 0xae, 0x2f, 0x40, - 0x8e, 0xe6, 0x1b, 0x73, 0x12, 0x72, 0x11, 0xa6, 0xfc, 0x5d, 0x5b, 0x37, 0xa9, 0x57, 0x77, 0xad, - 0x96, 0x6f, 0x39, 0xb6, 0x70, 0x0a, 0x26, 0xfd, 0x5d, 0x7b, 0xb9, 0x5b, 0xaa, 0xfe, 0x57, 0x02, - 0xf2, 0x0b, 0x75, 0xdf, 0xda, 0xa1, 0x0f, 0xda, 0xd4, 0xed, 0x90, 0xb9, 0x70, 0x1c, 0xb9, 0xc5, - 0x8c, 0x04, 0xa6, 0x00, 0x49, 0xef, 0x49, 0xe0, 0x74, 0xb0, 0x9f, 0x5d, 0x78, 0xc9, 0xc3, 0xc3, - 0x7b, 0x01, 0x26, 0x2d, 0x4f, 0x37, 0x2d, 0xcf, 0x77, 0xad, 0x5a, 0xbb, 0x1b, 0x5d, 0x9c, 0xb0, - 0xbc, 0xe5, 0x6e, 0x21, 0x59, 0x84, 0x74, 0xeb, 0x51, 0x10, 0x58, 0x9c, 0x1c, 0x78, 0xed, 0x11, - 0x3a, 0x5f, 0xdd, 0x31, 0x54, 0xee, 0x33, 0x1a, 0x8d, 0x93, 0xe2, 0x26, 0xd3, 0x8d, 0x6a, 0x2b, - 0x97, 0x12, 0x52, 0x6c, 0xfa, 0xb3, 0x90, 0x61, 0x52, 0xb2, 0x4c, 0x8c, 0xc7, 0x8c, 0x2f, 0x2e, - 0x1f, 0x6f, 0xe2, 0xd2, 0x4c, 0x19, 0x96, 0xb5, 0xb4, 0xbf, 0x6b, 0xaf, 0x9a, 0xea, 0x0b, 0x90, - 0x46, 0x20, 0x64, 0x02, 0x72, 0xf7, 0xb5, 0x95, 0xfb, 0x0b, 0xda, 0xea, 0xdd, 0x9b, 0x85, 0x13, - 0xec, 0x73, 0xe5, 0xff, 0xac, 0x2c, 0x3d, 0xdc, 0x60, 0x9f, 0x8a, 0xfa, 0x2a, 0x9c, 0x64, 0xae, - 0xcc, 0x3a, 0xf5, 0x3c, 0xcb, 0xb1, 0xc3, 0x1d, 0xa2, 0x04, 0xd9, 0xb6, 0x47, 0x5d, 0x69, 0xbf, - 0x0f, 0xbf, 0xd5, 0x6f, 0xa7, 0x61, 0x4c, 0xb4, 0x7f, 0xa6, 0xdb, 0x83, 0x8c, 0x21, 0x11, 0xc5, - 0xc0, 0x66, 0xb0, 0xde, 0xb4, 0xa8, 0xed, 0x87, 0x77, 0x61, 0x5c, 0xbf, 0x26, 0x78, 0xa9, 0xb8, - 0xda, 0x22, 0x97, 0xa1, 0x80, 0xd7, 0x0b, 0x75, 0x4c, 0x21, 0xd0, 0x91, 0x15, 0x77, 0x40, 0xa7, - 0xa4, 0xf2, 0xbb, 0x8c, 0xe3, 0x3a, 0x4c, 0x1a, 0x38, 0x89, 0xba, 0x08, 0xbc, 0xe2, 0xbd, 0x74, - 0x3e, 0x7a, 0xe2, 0x3d, 0x78, 0xd6, 0x83, 0xe0, 0x8d, 0x11, 0x16, 0x59, 0xd4, 0xeb, 0x2a, 0x69, - 0xe6, 0xf0, 0x4a, 0xfa, 0x1e, 0xe4, 0xb6, 0x76, 0xf4, 0x88, 0x82, 0x2c, 0x1e, 0x5d, 0x39, 0xc6, - 0xee, 0xec, 0x70, 0xf5, 0x18, 0xdb, 0xc2, 0x1f, 0x18, 0x4e, 0x6e, 0x1a, 0x9e, 0xaf, 0x4b, 0xa3, - 0xee, 0x60, 0xe8, 0x3a, 0xa7, 0x4d, 0xb1, 0x8a, 0xfe, 0x65, 0x99, 0x43, 0x10, 0xf2, 0xb2, 0x2c, - 0x43, 0xde, 0x60, 0xc7, 0x0e, 0xbd, 0xd6, 0xf1, 0x29, 0x0f, 0xd4, 0x25, 0x35, 0xc0, 0xa2, 0x45, - 0x56, 0x42, 0x2e, 0xc0, 0xd4, 0xb6, 0xb1, 0xab, 0xcb, 0x8d, 0xf2, 0xd8, 0x68, 0x62, 0xdb, 0xd8, - 0x5d, 0xe8, 0xb6, 0x5b, 0x00, 0x10, 0x38, 0xfc, 0x5d, 0x5b, 0x44, 0xd0, 0xd4, 0xb8, 0x33, 0x2c, - 0xb7, 0x7b, 0x5a, 0x8e, 0x53, 0x6d, 0xec, 0xda, 0xea, 0x2f, 0x2b, 0x30, 0x2d, 0xab, 0x32, 0xf7, - 0x07, 0x9e, 0xa5, 0x82, 0x1e, 0x1c, 0x02, 0xf8, 0x5d, 0x05, 0x66, 0xa2, 0xcb, 0x4a, 0x6c, 0x7a, - 0xcb, 0x90, 0xf5, 0x44, 0x99, 0xd8, 0xf5, 0xe2, 0x46, 0x29, 0xc8, 0x83, 0xc8, 0x68, 0x40, 0x49, - 0x6e, 0xf7, 0xec, 0x54, 0x71, 0x96, 0xa9, 0x4f, 0x24, 0xd1, 0xcd, 0x4a, 0x7d, 0x02, 0x64, 0xc9, - 0xb0, 0xeb, 0xb4, 0x89, 0x33, 0x3d, 0xd4, 0x45, 0xbd, 0x00, 0x59, 0xd4, 0x14, 0x7e, 0x69, 0xcb, - 0xcc, 0x74, 0x9e, 0x69, 0x17, 0x12, 0x33, 0xed, 0xc2, 0xca, 0x9e, 0xc5, 0x9b, 0xec, 0x31, 0x20, - 0x37, 0xe1, 0x64, 0xa4, 0x4b, 0x21, 0x1b, 0x76, 0x5c, 0xc0, 0x62, 0x6a, 0x8a, 0x0b, 0xa7, 0xf0, - 0x9b, 0x1d, 0x3b, 0x11, 0x6f, 0x70, 0xec, 0xc4, 0x0f, 0xb5, 0x03, 0x33, 0x9c, 0x91, 0x18, 0xe0, - 0x50, 0xf4, 0x2f, 0x01, 0x08, 0x21, 0x06, 0xf8, 0xc7, 0xf9, 0x6d, 0xa8, 0x60, 0xb0, 0xba, 0xac, - 0xe5, 0x44, 0x83, 0x21, 0x63, 0x58, 0x85, 0xd9, 0x9e, 0xae, 0x8f, 0x3c, 0x8a, 0x7f, 0x56, 0xa0, - 0xb0, 0xde, 0x32, 0xec, 0x48, 0x78, 0xec, 0x7c, 0xcf, 0x10, 0x16, 0xa1, 0xab, 0xb7, 0xe1, 0x70, - 0x34, 0xf9, 0x5e, 0x87, 0x8f, 0xe6, 0xda, 0x4f, 0xf7, 0xca, 0xaf, 0x1e, 0xce, 0x0d, 0xba, 0x43, - 0x3b, 0xd2, 0x75, 0xd0, 0xdd, 0xee, 0x75, 0x50, 0xf2, 0x38, 0x1c, 0xc5, 0x2d, 0x92, 0xfa, 0x47, - 0x0a, 0x4c, 0x4b, 0xa3, 0x13, 0x52, 0x5a, 0x83, 0xbc, 0xef, 0xf8, 0x46, 0x53, 0x0f, 0x62, 0x7a, - 0x7d, 0xd7, 0x49, 0xbd, 0xc1, 0xc5, 0x77, 0xfe, 0xf7, 0xd2, 0x12, 0xf2, 0x08, 0xd2, 0x22, 0x90, - 0x1e, 0x4b, 0x98, 0x19, 0xe2, 0x3e, 0x6c, 0xdd, 0x69, 0xdb, 0xdc, 0x61, 0x49, 0x6b, 0x80, 0x45, - 0x4b, 0xac, 0x84, 0xbc, 0x0e, 0x73, 0x46, 0xab, 0xe5, 0x3a, 0xbb, 0xd6, 0xb6, 0xe1, 0x53, 0xb6, - 0xf3, 0x6f, 0x09, 0x6b, 0xc4, 0x6f, 0xac, 0x67, 0xa4, 0xda, 0x65, 0xcb, 0xdb, 0x42, 0xa3, 0xa4, - 0xfe, 0x2f, 0x98, 0x11, 0x97, 0x13, 0xd1, 0x88, 0xf7, 0x28, 0x73, 0xa3, 0x7e, 0x67, 0x02, 0x66, - 0x7b, 0xa8, 0xfb, 0xa3, 0x63, 0xd9, 0x0f, 0xdb, 0x24, 0xfd, 0x8d, 0x02, 0x27, 0x83, 0x0b, 0x14, - 0x39, 0xbf, 0x22, 0x87, 0x76, 0xe2, 0x46, 0xfc, 0x89, 0xb2, 0x1f, 0x6b, 0x25, 0xbc, 0x9c, 0x19, - 0x9c, 0x67, 0xd1, 0x53, 0x7d, 0xf4, 0x3c, 0x8b, 0x56, 0x4f, 0x3f, 0xa5, 0xbf, 0xcf, 0xf1, 0xac, - 0x94, 0xf0, 0x46, 0xba, 0xef, 0x0e, 0x4b, 0x19, 0x70, 0x87, 0xf5, 0x73, 0x0a, 0xcc, 0x4a, 0x97, - 0xd4, 0x7a, 0x6f, 0xd8, 0xee, 0xde, 0xfe, 0x5e, 0xf9, 0xe4, 0xc3, 0x6e, 0x83, 0x63, 0x1f, 0x72, - 0x4e, 0xb6, 0x7b, 0x99, 0x99, 0x1e, 0xf9, 0x7d, 0x05, 0x2e, 0x48, 0x37, 0xdc, 0x7d, 0x17, 0xe4, - 0x12, 0xac, 0x24, 0xc2, 0xfa, 0xdc, 0xfe, 0x5e, 0xf9, 0x5c, 0xf7, 0xfa, 0x3b, 0x7a, 0x65, 0x7e, - 0x6c, 0x8c, 0xe7, 0xdc, 0x58, 0xce, 0xa6, 0x47, 0xbe, 0xac, 0x40, 0x31, 0x7a, 0x2b, 0x2f, 0x41, - 0x4c, 0x21, 0xc4, 0xfb, 0xfb, 0x7b, 0xe5, 0x99, 0xbb, 0xd2, 0x1d, 0xfd, 0xb1, 0x61, 0xcd, 0xd8, - 0x7d, 0xdc, 0x4c, 0x8f, 0xec, 0x02, 0x09, 0xee, 0xf3, 0x25, 0x0c, 0x69, 0xc4, 0x70, 0x67, 0x7f, - 0xaf, 0x3c, 0x75, 0x97, 0xdf, 0xee, 0x1f, 0xbb, 0xfb, 0x29, 0x5b, 0x66, 0x64, 0x7a, 0xe4, 0xeb, - 0x0a, 0x9c, 0xee, 0xc9, 0x2e, 0x90, 0x10, 0x64, 0x10, 0xc1, 0xfa, 0xfe, 0x5e, 0xf9, 0xd4, 0xc3, - 0x68, 0xa3, 0x63, 0x23, 0x39, 0xd5, 0x1e, 0xc4, 0xd0, 0xf4, 0xc8, 0xef, 0x28, 0xa0, 0x1e, 0x94, - 0xc1, 0x20, 0x41, 0x1b, 0x43, 0x68, 0xef, 0xee, 0xef, 0x95, 0xe7, 0x1f, 0x0c, 0xcc, 0x67, 0x38, - 0x36, 0xc2, 0xf9, 0x27, 0x31, 0x7c, 0x4d, 0x8f, 0x7c, 0x4b, 0x81, 0xb3, 0xfd, 0x09, 0x13, 0x12, - 0xc4, 0x6c, 0x57, 0x7a, 0x5a, 0x34, 0x7d, 0xe2, 0xf8, 0xd2, 0x73, 0x07, 0x31, 0x34, 0x3d, 0xf2, - 0x55, 0x05, 0x8a, 0xd1, 0x14, 0x0c, 0x09, 0x50, 0x0e, 0x01, 0x69, 0xfb, 0x7b, 0xe5, 0xb9, 0x7b, - 0x3b, 0x1f, 0xea, 0x6c, 0xce, 0x39, 0x3b, 0x83, 0x26, 0xb3, 0xf4, 0x81, 0x12, 0xee, 0x07, 0x43, - 0x33, 0xc7, 0xd6, 0xa3, 0xf1, 0x81, 0xb7, 0x0e, 0x6d, 0xb6, 0x65, 0xcb, 0x29, 0x85, 0x0c, 0x6e, - 0xa7, 0xb2, 0x4a, 0x21, 0xab, 0xbe, 0x01, 0x85, 0x5b, 0x8e, 0x7f, 0x84, 0x3d, 0xed, 0x2b, 0x63, - 0x30, 0x2d, 0x51, 0x7e, 0x04, 0xa9, 0x9b, 0x7f, 0xab, 0xc0, 0xec, 0x23, 0xc7, 0xe7, 0x33, 0x37, - 0x20, 0x63, 0x70, 0x29, 0x46, 0x34, 0x7d, 0x48, 0xbb, 0x25, 0xd1, 0xed, 0xec, 0xbe, 0xd8, 0xce, - 0xa6, 0x7b, 0xeb, 0x8f, 0xbc, 0x9f, 0x4d, 0x3f, 0xea, 0xed, 0xa9, 0xb4, 0x03, 0xd9, 0x80, 0x3d, - 0xf9, 0x24, 0xa4, 0x4c, 0xea, 0xd5, 0x85, 0xdb, 0xa3, 0x0e, 0xc8, 0xb0, 0xc0, 0x76, 0x41, 0xa0, - 0x24, 0xf4, 0xd9, 0x91, 0xea, 0x80, 0xac, 0xa1, 0xc4, 0xe0, 0xac, 0xa1, 0xd2, 0xdf, 0x29, 0x30, - 0x81, 0x77, 0xc8, 0xe1, 0x7c, 0x3d, 0xeb, 0x0b, 0xea, 0x77, 0x01, 0xba, 0x53, 0x26, 0xe6, 0xe9, - 0xda, 0x91, 0xe6, 0x29, 0x4c, 0x34, 0x0c, 0x5a, 0x94, 0x7e, 0x49, 0xe1, 0x6e, 0x41, 0x38, 0x98, - 0x91, 0xdc, 0x02, 0x0d, 0x32, 0x08, 0x2e, 0x40, 0xf3, 0xe6, 0xa1, 0xd0, 0x44, 0xa4, 0xa7, 0x09, - 0x4e, 0xa5, 0x2f, 0xc2, 0xdc, 0x60, 0x75, 0x1a, 0xb0, 0x9e, 0xef, 0x45, 0xd7, 0xf3, 0x27, 0x0e, - 0xd5, 0xbd, 0x3c, 0x5c, 0x39, 0xfc, 0x77, 0x19, 0xc6, 0x47, 0xbd, 0xaa, 0xfd, 0x5e, 0x5a, 0x24, - 0x6d, 0x7c, 0x24, 0x6b, 0x56, 0x0e, 0xaa, 0x27, 0x9e, 0x41, 0x50, 0xfd, 0x2f, 0x15, 0x98, 0x71, - 0xc5, 0x40, 0x22, 0x26, 0x81, 0xc7, 0xc6, 0x3f, 0x35, 0xec, 0x1a, 0xa1, 0x1b, 0x42, 0x0e, 0x98, - 0x1c, 0x60, 0x0e, 0x7a, 0xeb, 0x8f, 0x6e, 0x0e, 0xdc, 0xde, 0x9e, 0x4a, 0xdf, 0xec, 0x55, 0xe4, - 0x12, 0x64, 0x83, 0x56, 0xc1, 0xb9, 0xd1, 0x3d, 0x50, 0xc9, 0x07, 0x3d, 0x1c, 0xf9, 0x34, 0xa4, - 0x2d, 0x7b, 0xd3, 0x09, 0xae, 0x08, 0x0e, 0x75, 0x9b, 0x82, 0x84, 0xa5, 0xf7, 0x61, 0x6e, 0xb0, - 0x48, 0x06, 0xa8, 0xf4, 0x9d, 0xa8, 0x4a, 0x5f, 0x1b, 0x59, 0xe8, 0x07, 0xa8, 0xf3, 0xed, 0x54, - 0x36, 0x55, 0x48, 0xab, 0x2f, 0x03, 0x59, 0xee, 0x3e, 0xb5, 0x1a, 0x7a, 0x5f, 0x75, 0x49, 0xd8, - 0xb6, 0xe1, 0x2d, 0xff, 0x20, 0x01, 0xe3, 0xd8, 0x34, 0x78, 0xdf, 0xf3, 0xac, 0xad, 0xe0, 0x8b, - 0x30, 0x4d, 0xed, 0xba, 0xdb, 0xc1, 0x38, 0x77, 0x90, 0xbc, 0x85, 0x67, 0x74, 0xad, 0xd0, 0xad, - 0x10, 0xf7, 0x50, 0xe5, 0xe0, 0x38, 0xcc, 0x2f, 0x18, 0xf9, 0xa1, 0x94, 0x9f, 0x70, 0xf1, 0x0e, - 0xb2, 0xdb, 0x80, 0x9f, 0x5a, 0x53, 0x52, 0x03, 0x1e, 0x40, 0xbb, 0x04, 0x05, 0x11, 0x40, 0xdb, - 0xa2, 0x1d, 0xc1, 0x86, 0x67, 0x16, 0x8b, 0xb0, 0xe6, 0x1d, 0xda, 0xe1, 0xac, 0xa2, 0x2d, 0x39, - 0xbf, 0x4c, 0x4f, 0x4b, 0x7e, 0xfe, 0xfd, 0x0c, 0x4c, 0x06, 0xd2, 0x0d, 0x33, 0x12, 0x02, 0x43, - 0xca, 0x83, 0x57, 0x17, 0x63, 0xef, 0x91, 0xbb, 0xd2, 0x0e, 0x62, 0x4e, 0x9c, 0x58, 0xbd, 0x0e, - 0xd3, 0x98, 0xc2, 0xb9, 0x4d, 0xed, 0xc3, 0x45, 0x3c, 0xd4, 0x6f, 0xa5, 0x81, 0xc8, 0xa4, 0x02, - 0x57, 0x0b, 0xf3, 0x0f, 0x44, 0xa9, 0xc0, 0x76, 0x3b, 0xfe, 0x8e, 0xbb, 0x87, 0x45, 0x65, 0xc9, - 0x69, 0x36, 0x69, 0xdd, 0xa7, 0x66, 0x58, 0xd7, 0x97, 0x4f, 0x28, 0xf5, 0x41, 0x96, 0x00, 0x30, - 0x7a, 0xea, 0x52, 0x8f, 0x1e, 0xee, 0x0e, 0x22, 0xc7, 0xe8, 0x34, 0x46, 0x46, 0xde, 0x80, 0xa2, - 0x65, 0xfb, 0xd4, 0xb5, 0x8d, 0xa6, 0x6e, 0xb4, 0x5a, 0x18, 0x9f, 0xd6, 0x5b, 0x2e, 0xdd, 0xb4, - 0x76, 0x45, 0x98, 0x7a, 0x36, 0xa8, 0x5f, 0x68, 0xb5, 0xee, 0x1a, 0xdb, 0xf4, 0x3e, 0x56, 0x96, - 0xfe, 0x5a, 0x81, 0xf9, 0x15, 0xf1, 0x36, 0x69, 0x00, 0xde, 0x3b, 0xb4, 0x43, 0x6e, 0x40, 0x96, - 0xcd, 0x6f, 0x98, 0xe9, 0xd1, 0x13, 0x5e, 0x79, 0xd2, 0xac, 0x0c, 0x26, 0x0c, 0x72, 0x24, 0xb6, - 0x68, 0x67, 0xd9, 0xf0, 0x0d, 0x79, 0xa7, 0x48, 0x7c, 0xc8, 0x3b, 0x05, 0x1b, 0xc6, 0xd9, 0x38, - 0xb9, 0x13, 0xb3, 0x6b, 0x77, 0xf2, 0x57, 0xd7, 0x0e, 0x37, 0xa1, 0xf1, 0xf2, 0x11, 0xc3, 0x44, - 0x5b, 0xf6, 0x76, 0x90, 0x5a, 0x96, 0xe8, 0x8f, 0x3b, 0x0f, 0x96, 0x53, 0x34, 0xa5, 0xec, 0xcb, - 0x09, 0x28, 0x85, 0x8d, 0x22, 0xe6, 0xab, 0xe5, 0xb8, 0x3e, 0x99, 0x0c, 0xef, 0xb4, 0x92, 0x18, - 0x34, 0x3f, 0x0b, 0xb9, 0xba, 0xb3, 0xdd, 0x6a, 0x52, 0x9f, 0x9a, 0x22, 0x9b, 0xa1, 0x5b, 0x40, - 0x5e, 0x83, 0xd9, 0x50, 0xcd, 0xf4, 0x4d, 0xcb, 0x6e, 0x50, 0xb7, 0xe5, 0x5a, 0xb6, 0x2f, 0x22, - 0x90, 0x33, 0x61, 0xe5, 0x8d, 0x6e, 0x1d, 0x79, 0x1b, 0x8a, 0x5d, 0x22, 0xe9, 0xad, 0x2a, 0x9b, - 0x35, 0x7c, 0x7d, 0x87, 0x80, 0x15, 0x6d, 0xce, 0x1b, 0x00, 0x13, 0xef, 0xa9, 0xc7, 0x5d, 0xbe, - 0x0c, 0xa9, 0xa9, 0x1b, 0xbe, 0x78, 0x58, 0x34, 0x9a, 0x3e, 0xe7, 0x43, 0xca, 0x05, 0x5f, 0x7d, - 0x0f, 0x2e, 0x2e, 0xb9, 0xd4, 0xf0, 0xe9, 0xc1, 0xf2, 0x08, 0xd6, 0xfb, 0x81, 0x03, 0x55, 0x0e, - 0x1e, 0xa8, 0xda, 0x81, 0x4b, 0xc3, 0xf9, 0x0b, 0xa3, 0xf0, 0x0e, 0x64, 0x5c, 0x2c, 0x11, 0xfa, - 0x73, 0x6d, 0x14, 0xfd, 0xe9, 0x67, 0x27, 0x98, 0xa8, 0xcf, 0x83, 0x7a, 0x70, 0xab, 0xf0, 0xf1, - 0xd4, 0xcf, 0xc0, 0xf9, 0xd8, 0x56, 0x02, 0xdb, 0x43, 0x18, 0xe3, 0x6c, 0x03, 0x6b, 0x75, 0x34, - 0x70, 0xc1, 0x62, 0x15, 0xbc, 0xd4, 0x3f, 0x51, 0x60, 0x66, 0x50, 0xeb, 0x3e, 0x1d, 0x3c, 0x50, - 0xf8, 0x89, 0x18, 0x2d, 0xbb, 0x09, 0xe3, 0xf5, 0x60, 0xb5, 0x32, 0x2d, 0x39, 0x8c, 0xd5, 0xcb, - 0x87, 0x94, 0x0b, 0xf8, 0x68, 0xc6, 0x77, 0x8d, 0x7a, 0xf8, 0x34, 0x08, 0x3f, 0xd4, 0xcf, 0xc0, - 0x99, 0xc1, 0x23, 0xe5, 0xfa, 0x72, 0x3d, 0x46, 0xc7, 0xf9, 0xc0, 0x0e, 0xd0, 0x6e, 0xf5, 0x09, - 0x9c, 0x1d, 0xcc, 0x58, 0x4c, 0xc6, 0x03, 0xc8, 0x4b, 0xfc, 0x84, 0x15, 0xa8, 0x1e, 0x76, 0x42, - 0x64, 0x1e, 0xea, 0x6b, 0x50, 0xbc, 0xed, 0xd4, 0x34, 0xda, 0xb0, 0x3c, 0xdf, 0xed, 0x88, 0xbb, - 0xfb, 0x61, 0x3e, 0xca, 0x7f, 0x28, 0x70, 0x7a, 0x00, 0xd5, 0x47, 0xe0, 0xb2, 0x7f, 0x16, 0xc6, - 0xdd, 0xb6, 0x6d, 0x5b, 0x76, 0x43, 0x7f, 0xec, 0xd4, 0x82, 0x63, 0x52, 0x5c, 0x42, 0xc6, 0x81, - 0x38, 0xb1, 0x26, 0x2f, 0xb8, 0xdd, 0x76, 0x6a, 0x5e, 0x69, 0x16, 0x92, 0xb7, 0x9d, 0x5a, 0xaf, - 0x0a, 0xaa, 0x97, 0xa1, 0x70, 0xdb, 0xa9, 0x45, 0x45, 0x33, 0x0b, 0x99, 0xc7, 0x4e, 0xad, 0x3b, - 0xa3, 0xe9, 0xc7, 0x4e, 0x6d, 0xd5, 0x54, 0x57, 0x60, 0x5a, 0x6a, 0x2a, 0xe4, 0xf1, 0x0a, 0x24, - 0x1f, 0x3b, 0x35, 0xb1, 0xb6, 0xe7, 0x7b, 0x6c, 0x36, 0xfe, 0x55, 0x00, 0xfe, 0x17, 0x02, 0x10, - 0x10, 0x6b, 0x7a, 0xe5, 0x1a, 0x40, 0x37, 0xb9, 0x8d, 0xcc, 0x40, 0xe1, 0xe6, 0x3d, 0xed, 0xde, - 0xc3, 0x8d, 0xd5, 0xbb, 0x2b, 0xfa, 0xfa, 0xc6, 0xc2, 0xd2, 0x9d, 0xf5, 0xc2, 0x09, 0x32, 0x0d, - 0x13, 0x1b, 0xb7, 0xb4, 0x95, 0x85, 0xe5, 0xa0, 0x48, 0xb9, 0xf2, 0x3c, 0x64, 0x83, 0xcc, 0xb1, - 0x6e, 0x56, 0x15, 0x99, 0x04, 0x08, 0xc9, 0xd7, 0x0b, 0xca, 0xd5, 0xbf, 0x7a, 0x01, 0x32, 0xc2, - 0x9b, 0xfb, 0x8e, 0x02, 0xe3, 0xf2, 0x93, 0x5e, 0x52, 0x19, 0xed, 0xd1, 0x6e, 0x20, 0x86, 0x52, - 0x75, 0xe4, 0xf6, 0x5c, 0x16, 0xea, 0xc5, 0x0f, 0xfe, 0xe1, 0xdf, 0x7e, 0x2d, 0xf1, 0x31, 0x52, - 0xae, 0x0a, 0x4f, 0xb3, 0x2a, 0xbf, 0xf8, 0xad, 0xbe, 0x2f, 0x14, 0xe7, 0x29, 0xf9, 0x79, 0x05, - 0xc6, 0x02, 0x0f, 0x38, 0x2e, 0xbf, 0x25, 0xfa, 0x40, 0xb8, 0x74, 0x65, 0x94, 0xa6, 0x02, 0x8b, - 0x8a, 0x58, 0xce, 0x92, 0x52, 0x88, 0xc5, 0xe4, 0x2d, 0x24, 0x18, 0x2e, 0xa4, 0xf1, 0x3d, 0x27, - 0xb9, 0x38, 0xfc, 0xc5, 0x27, 0x47, 0x70, 0x69, 0xd4, 0xa7, 0xa1, 0xea, 0x1c, 0xf6, 0x5f, 0x20, - 0x93, 0x61, 0xff, 0xfc, 0xe9, 0xe9, 0x17, 0x20, 0x85, 0x19, 0x6d, 0x17, 0x86, 0x70, 0x0a, 0x7a, - 0x3c, 0xd4, 0x2b, 0x57, 0xf5, 0x1c, 0xf6, 0x5a, 0x22, 0xc5, 0x68, 0xaf, 0xd2, 0x98, 0x9f, 0xf2, - 0x67, 0x99, 0x98, 0xc5, 0x44, 0x5e, 0x1c, 0x2d, 0xd7, 0xe9, 0x60, 0x24, 0x07, 0x26, 0x46, 0xa9, - 0xb3, 0x88, 0x64, 0x8a, 0x4c, 0x84, 0x48, 0x5c, 0x63, 0xd3, 0x27, 0x5f, 0x52, 0x20, 0xc3, 0x23, - 0x0b, 0x64, 0xe8, 0x93, 0x9c, 0x50, 0xea, 0x97, 0x47, 0x68, 0x29, 0xba, 0xfd, 0x18, 0x76, 0x7b, - 0x86, 0x9c, 0x96, 0xba, 0x65, 0x0d, 0x24, 0x09, 0x78, 0x90, 0xe1, 0xef, 0x2a, 0x62, 0x11, 0x44, - 0x9e, 0x5e, 0x94, 0xe4, 0x5c, 0x5a, 0xf1, 0x47, 0x40, 0xd8, 0xc1, 0x55, 0x48, 0xbd, 0xbf, 0x53, - 0xf1, 0xf7, 0x42, 0xba, 0x9d, 0x7e, 0x5b, 0x81, 0xbc, 0xf4, 0x20, 0x80, 0xbc, 0x3c, 0xda, 0xc3, - 0x81, 0xa0, 0xff, 0xca, 0xa8, 0xcd, 0x85, 0x18, 0x2e, 0x20, 0xa2, 0x73, 0x64, 0x3e, 0x44, 0xc4, - 0x6f, 0x30, 0xd1, 0x53, 0x94, 0x60, 0x7d, 0x53, 0x81, 0x5c, 0x98, 0xb1, 0x1d, 0xab, 0x0e, 0xbd, - 0x79, 0xea, 0xb1, 0xea, 0xd0, 0x97, 0x44, 0xae, 0x5e, 0x46, 0x40, 0xe7, 0xc9, 0xc7, 0x42, 0x40, - 0x46, 0xd0, 0x06, 0x55, 0x54, 0xc2, 0xf4, 0x5d, 0x05, 0x26, 0xa3, 0x19, 0xfd, 0xe4, 0x95, 0x91, - 0xfa, 0x92, 0xc2, 0x50, 0xa5, 0x57, 0x0f, 0x41, 0x21, 0x20, 0xbe, 0x88, 0x10, 0x5f, 0x20, 0xe7, - 0x07, 0x40, 0x44, 0x25, 0xaa, 0xbe, 0x1f, 0x04, 0x94, 0x9e, 0x92, 0xaf, 0x28, 0x30, 0x2e, 0xe7, - 0x34, 0xc4, 0x1a, 0xd7, 0x01, 0xa9, 0x4d, 0xb1, 0xc6, 0x75, 0x50, 0xce, 0x86, 0x7a, 0x1a, 0xe1, - 0x9d, 0x24, 0xd3, 0x21, 0xbc, 0x30, 0x11, 0xe3, 0xd7, 0x45, 0xce, 0x09, 0x3e, 0x05, 0xfb, 0xe8, - 0x10, 0x95, 0x11, 0xd1, 0x69, 0x72, 0x2a, 0x44, 0x84, 0x0f, 0xdb, 0x74, 0x19, 0x57, 0x5e, 0x4a, - 0xb1, 0x88, 0x55, 0xfa, 0xfe, 0xec, 0x8f, 0x58, 0xa5, 0x1f, 0x90, 0xb9, 0x31, 0x68, 0xfb, 0xc1, - 0x56, 0x3c, 0xa9, 0x48, 0xd2, 0xb0, 0xdf, 0x52, 0x60, 0x22, 0x92, 0x36, 0x41, 0xaa, 0x43, 0xbb, - 0x8a, 0xe6, 0x76, 0x94, 0x5e, 0x19, 0x9d, 0xe0, 0xc0, 0x15, 0x20, 0xd0, 0x09, 0x71, 0x49, 0xf8, - 0xbe, 0xa4, 0x40, 0x2e, 0x4c, 0x56, 0x88, 0x5d, 0x95, 0xbd, 0x09, 0x1b, 0xb1, 0xab, 0xb2, 0x2f, - 0xff, 0x41, 0x2d, 0x22, 0x26, 0xa2, 0x76, 0x8d, 0xb4, 0xd7, 0x32, 0xec, 0x37, 0x95, 0x2b, 0xe4, - 0x0b, 0xe8, 0x46, 0xd4, 0xb7, 0xe2, 0xcd, 0x74, 0x24, 0xeb, 0xbf, 0x14, 0xb7, 0x8b, 0xca, 0x4f, - 0x3f, 0x06, 0xd8, 0x4b, 0x0f, 0x19, 0x49, 0x22, 0xf8, 0x59, 0x05, 0xc6, 0x44, 0x76, 0x79, 0xac, - 0x87, 0x10, 0xcd, 0x40, 0x1f, 0x1d, 0x42, 0xbf, 0x7b, 0xd0, 0xe2, 0x9c, 0x7a, 0x30, 0x88, 0x94, - 0xf5, 0x58, 0x0c, 0xd1, 0xb4, 0xf6, 0xe3, 0x60, 0xd8, 0xe6, 0x9c, 0x24, 0x0c, 0xbf, 0xa0, 0x40, - 0x36, 0x78, 0x02, 0x40, 0xe2, 0xfc, 0x9f, 0x9e, 0x57, 0x0c, 0xa5, 0x17, 0x47, 0x6a, 0x2b, 0x90, - 0xf4, 0xbb, 0x0d, 0x18, 0xba, 0x8b, 0xee, 0x5f, 0xe3, 0xf2, 0xa3, 0x93, 0x78, 0xeb, 0xd2, 0xff, - 0x9a, 0x25, 0xde, 0xba, 0x0c, 0x78, 0xcd, 0xa2, 0x9e, 0x47, 0x4c, 0xcf, 0x91, 0x33, 0x92, 0x75, - 0x69, 0xf4, 0xc2, 0xfa, 0xba, 0x02, 0x63, 0x82, 0x3a, 0x76, 0x8a, 0xa2, 0xaf, 0x5b, 0x4a, 0x2f, - 0xc7, 0x37, 0xed, 0x79, 0xdb, 0xa3, 0x5e, 0x41, 0x28, 0xcf, 0x13, 0x35, 0x06, 0x4a, 0xf5, 0x7d, - 0x56, 0xf0, 0x94, 0xf9, 0x77, 0x6b, 0x4e, 0xc3, 0x8b, 0xf5, 0xef, 0xa4, 0x27, 0x4e, 0x87, 0x85, - 0x32, 0xc8, 0xe6, 0x36, 0x64, 0x89, 0x7c, 0x4b, 0xc1, 0xbf, 0xd8, 0xd0, 0xbd, 0x8d, 0x8d, 0xb5, - 0x6d, 0x83, 0x12, 0x8b, 0x62, 0x6d, 0xdb, 0xc0, 0x8b, 0x5e, 0x75, 0x1e, 0x51, 0x15, 0xc9, 0x9c, - 0xbc, 0x9a, 0x58, 0x3b, 0x91, 0xd3, 0xfe, 0x81, 0x02, 0xb9, 0xf0, 0x4a, 0x29, 0xd6, 0xa0, 0xf5, - 0xde, 0x08, 0xc7, 0x1a, 0xb4, 0xbe, 0x5b, 0x2a, 0xb5, 0x84, 0x40, 0x66, 0x08, 0x09, 0x81, 0x3c, - 0x72, 0x7c, 0x01, 0xe2, 0x29, 0xa4, 0xb9, 0x37, 0x71, 0x71, 0xf8, 0x2d, 0xc1, 0x70, 0x6f, 0x3f, - 0xea, 0x3b, 0x1c, 0xe0, 0x76, 0xca, 0x1e, 0xc3, 0x6f, 0x28, 0x90, 0x97, 0x63, 0x21, 0x71, 0x13, - 0xdf, 0x1f, 0x77, 0x18, 0x34, 0x29, 0x91, 0xbf, 0x0d, 0x27, 0xd1, 0xf0, 0xa0, 0xcc, 0x00, 0x1f, - 0x50, 0x22, 0x88, 0xee, 0x36, 0x19, 0x1e, 0x60, 0x1f, 0x62, 0xeb, 0xa5, 0x1b, 0x8e, 0x58, 0x97, - 0x3c, 0x1a, 0xad, 0x1f, 0x68, 0xed, 0x59, 0x03, 0x09, 0xc2, 0x2f, 0x2a, 0x78, 0x26, 0x0e, 0xa2, - 0xda, 0x2f, 0x8d, 0x18, 0x62, 0x1d, 0xbe, 0x82, 0xfa, 0x03, 0xb2, 0xea, 0x19, 0x84, 0x33, 0x4b, - 0x4e, 0xca, 0x9b, 0x4f, 0xd0, 0xf3, 0x0f, 0x15, 0x38, 0x37, 0x2c, 0xb2, 0x47, 0x16, 0xe3, 0xf6, - 0xfe, 0xd1, 0xc2, 0x8e, 0xa5, 0xa5, 0x63, 0xf1, 0x88, 0x9a, 0x48, 0xb5, 0x28, 0x0d, 0x65, 0xdb, - 0x67, 0xb3, 0x2c, 0x22, 0x71, 0x6c, 0x27, 0xff, 0x73, 0xe5, 0xa0, 0xb8, 0x13, 0x22, 0xf1, 0xc8, - 0x5b, 0x47, 0x8a, 0xf9, 0x85, 0xe2, 0x7f, 0xfb, 0xa8, 0xe4, 0x07, 0xee, 0x3d, 0x3d, 0x83, 0x20, - 0x7f, 0x7a, 0x50, 0x38, 0xf1, 0xe3, 0x87, 0xee, 0x9a, 0x43, 0x7e, 0xe3, 0xd0, 0x74, 0x02, 0xeb, - 0xeb, 0x88, 0xb5, 0x42, 0x5e, 0xea, 0xc3, 0x5a, 0x7d, 0xff, 0xa0, 0xa8, 0xe0, 0x53, 0xf2, 0x3d, - 0x05, 0x03, 0x47, 0xd1, 0x40, 0x15, 0x79, 0xed, 0x70, 0x61, 0x2d, 0x8e, 0xfc, 0xf5, 0xa3, 0xc4, - 0xc2, 0x06, 0x38, 0xc6, 0x8f, 0x9d, 0x9a, 0xee, 0x8a, 0xc6, 0x51, 0x6f, 0x23, 0x17, 0x86, 0xb8, - 0x62, 0xed, 0x74, 0x6f, 0xcc, 0x2c, 0xd6, 0x4e, 0xf7, 0x45, 0xcd, 0xd4, 0xe7, 0x10, 0xd1, 0x29, - 0x32, 0x2b, 0x23, 0xaa, 0xbe, 0xcf, 0xa3, 0x6e, 0x4f, 0x17, 0xaf, 0xfc, 0xe0, 0x5f, 0xe7, 0x4f, - 0xfc, 0x60, 0x7f, 0x5e, 0xf9, 0xe1, 0xfe, 0xbc, 0xf2, 0xa3, 0xfd, 0x79, 0xe5, 0x5f, 0xf6, 0xe7, - 0x95, 0x6f, 0xfc, 0x64, 0xfe, 0xc4, 0x0f, 0x7f, 0x32, 0x7f, 0xe2, 0x47, 0x3f, 0x99, 0x3f, 0xf1, - 0x6e, 0x36, 0x60, 0x5e, 0xcb, 0x60, 0xc0, 0xf7, 0xb5, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x50, - 0xa1, 0xe8, 0x97, 0xef, 0x53, 0x00, 0x00, + proto.RegisterFile("server/serverpb/status.proto", fileDescriptor_status_731fb2d638c68f09) +} + +var fileDescriptor_status_731fb2d638c68f09 = []byte{ + // 5866 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x7c, 0xdb, 0x8f, 0x1c, 0xc7, + 0x75, 0x37, 0x7b, 0x6e, 0x3b, 0x73, 0x66, 0x2f, 0xb3, 0xc5, 0xdd, 0xe5, 0x70, 0x48, 0xed, 0xd0, + 0x4d, 0x89, 0x22, 0x29, 0x69, 0x46, 0xa2, 0x44, 0x8b, 0xd6, 0x67, 0xc9, 0xde, 0x1b, 0xc9, 0x25, + 0x57, 0xbc, 0xf4, 0x2e, 0x3f, 0x07, 0xb2, 0xa3, 0x4e, 0xcf, 0x74, 0xed, 0xb0, 0xb9, 0xb3, 0xdd, + 0xc3, 0xee, 0x9e, 0xcd, 0x8e, 0x15, 0xda, 0x8e, 0x72, 0x73, 0x1c, 0xc7, 0xb7, 0x38, 0x81, 0x1f, + 0x12, 0x20, 0xf0, 0x43, 0x9c, 0x97, 0x04, 0x0e, 0xf2, 0x92, 0x04, 0x48, 0x82, 0x5c, 0x90, 0x18, + 0x08, 0x10, 0x18, 0x48, 0x1e, 0x8c, 0x04, 0x58, 0x27, 0xab, 0x3c, 0x04, 0xc8, 0x7f, 0x60, 0x20, + 0x41, 0x50, 0xa7, 0xaa, 0x7b, 0xaa, 0x67, 0x66, 0x7b, 0x66, 0x77, 0x45, 0x21, 0x0f, 0x12, 0xa7, + 0xab, 0xea, 0x9c, 0xfa, 0xd5, 0xa9, 0x53, 0xa7, 0x4e, 0x9d, 0x3a, 0xb5, 0x70, 0xd6, 0xa3, 0xee, + 0x0e, 0x75, 0xab, 0xfc, 0x9f, 0x56, 0xad, 0xea, 0xf9, 0x86, 0xdf, 0xf6, 0x2a, 0x2d, 0xd7, 0xf1, + 0x1d, 0x72, 0xba, 0xee, 0xd4, 0xb7, 0x5c, 0xc7, 0xa8, 0x3f, 0xac, 0xf0, 0x06, 0x95, 0xa0, 0x5d, + 0xa9, 0x50, 0x6b, 0x5b, 0x4d, 0xb3, 0x6a, 0xd9, 0x9b, 0x0e, 0x6f, 0x5c, 0x3a, 0xd9, 0x70, 0x3c, + 0xcf, 0x6a, 0x55, 0xf9, 0x3f, 0xa2, 0x70, 0xee, 0x91, 0x53, 0xf3, 0xaa, 0xec, 0x7f, 0xad, 0x1a, + 0xfe, 0x23, 0xca, 0x4f, 0x21, 0xd7, 0x56, 0xad, 0x6a, 0xb4, 0x5a, 0x3a, 0xeb, 0x33, 0xa8, 0x20, + 0x41, 0x85, 0x69, 0xf8, 0x46, 0xc0, 0x24, 0x28, 0xdb, 0xa6, 0xbe, 0x21, 0x95, 0x5f, 0x10, 0xe0, + 0x4d, 0xcb, 0x68, 0xd8, 0x8e, 0xe7, 0x5b, 0x75, 0xd6, 0x8b, 0xf4, 0x25, 0xda, 0x9d, 0x0f, 0x06, + 0x89, 0x63, 0x13, 0xff, 0xf4, 0x8c, 0xb5, 0xf4, 0x8c, 0xe7, 0x3b, 0xae, 0xd1, 0xa0, 0x55, 0x6a, + 0x37, 0x2c, 0x9b, 0xb6, 0x6a, 0xe2, 0x87, 0xa8, 0x3e, 0xd3, 0x57, 0xbd, 0xbd, 0x53, 0xaf, 0x8b, + 0xca, 0xf9, 0xbe, 0x4a, 0xd7, 0xa9, 0x6f, 0x79, 0x66, 0x4d, 0xd4, 0x5f, 0xda, 0xda, 0xa9, 0x6e, + 0xed, 0x08, 0x14, 0xc1, 0x8f, 0x56, 0xad, 0xda, 0xa4, 0x86, 0x47, 0xf5, 0x08, 0x0c, 0xf5, 0x80, + 0xa6, 0xac, 0x51, 0x80, 0xe5, 0xb9, 0x83, 0xd8, 0x59, 0x3b, 0xd4, 0xa6, 0x5e, 0x28, 0xca, 0xb6, + 0x6f, 0x35, 0xab, 0x4d, 0xa7, 0xc1, 0xfe, 0x13, 0x65, 0x25, 0x2c, 0x6b, 0xdb, 0x2e, 0xf5, 0x9c, + 0xe6, 0x0e, 0x35, 0x75, 0xc3, 0x34, 0xdd, 0x60, 0x88, 0xd4, 0xaf, 0x9b, 0x55, 0xd7, 0xd8, 0xf4, + 0xf1, 0x7f, 0x6c, 0x14, 0xc6, 0xa6, 0x2f, 0x2a, 0x67, 0x1a, 0x4e, 0xc3, 0xc1, 0x9f, 0x55, 0xf6, + 0x4b, 0x94, 0x9e, 0x6d, 0x38, 0x4e, 0xa3, 0x49, 0xab, 0x46, 0xcb, 0xaa, 0x1a, 0xb6, 0xed, 0xf8, + 0x86, 0x6f, 0x39, 0x76, 0x00, 0xa0, 0x2c, 0x6a, 0xf1, 0xab, 0xd6, 0xde, 0xac, 0xfa, 0xd6, 0x36, + 0xf5, 0x7c, 0x63, 0x5b, 0x68, 0x87, 0x5a, 0x81, 0x93, 0x4b, 0xd4, 0xf5, 0xad, 0x4d, 0xab, 0x6e, + 0xf8, 0xd4, 0xd3, 0xe8, 0xe3, 0x36, 0xf5, 0x7c, 0x72, 0x0a, 0xc6, 0x6c, 0xc7, 0xa4, 0xba, 0x65, + 0x16, 0x95, 0x73, 0xca, 0xc5, 0x9c, 0x96, 0x61, 0x9f, 0xab, 0xa6, 0xfa, 0xdf, 0x29, 0x20, 0x12, + 0xc1, 0x32, 0xf5, 0x0d, 0xab, 0xe9, 0x91, 0xfb, 0x90, 0xf2, 0x3b, 0x2d, 0x8a, 0x8d, 0x27, 0xaf, + 0xbc, 0x59, 0x39, 0x50, 0x6b, 0x2b, 0xfd, 0xc4, 0x72, 0xd1, 0x46, 0xa7, 0x45, 0x35, 0x64, 0x45, + 0xce, 0xc3, 0x04, 0x75, 0x5d, 0xc7, 0xd5, 0xb7, 0xa9, 0xe7, 0x19, 0x0d, 0x5a, 0x4c, 0x20, 0x90, + 0x71, 0x2c, 0x7c, 0x9b, 0x97, 0x11, 0x02, 0x29, 0xa6, 0x8d, 0xc5, 0xe4, 0x39, 0xe5, 0xe2, 0xb8, + 0x86, 0xbf, 0x89, 0x06, 0x99, 0x4d, 0x8b, 0x36, 0x4d, 0xaf, 0x98, 0x3a, 0x97, 0xbc, 0x98, 0xbf, + 0xf2, 0xda, 0xe1, 0xd0, 0x5c, 0x47, 0xda, 0xc5, 0xd4, 0x0f, 0xf6, 0xca, 0x27, 0x34, 0xc1, 0xa9, + 0xf4, 0xc7, 0x09, 0xc8, 0xf0, 0x0a, 0x32, 0x07, 0x19, 0xcb, 0xf3, 0xda, 0xd4, 0x0d, 0x24, 0xc3, + 0xbf, 0x48, 0x11, 0xc6, 0xbc, 0x76, 0xed, 0x11, 0xad, 0xfb, 0x02, 0x69, 0xf0, 0x49, 0x9e, 0x01, + 0xd8, 0x31, 0x9a, 0x96, 0xa9, 0x6f, 0xba, 0xce, 0x36, 0x42, 0x4d, 0x6a, 0x39, 0x2c, 0xb9, 0xee, + 0x3a, 0xdb, 0xa4, 0x0c, 0x79, 0x5e, 0xdd, 0xb6, 0x7d, 0xab, 0x59, 0x4c, 0x61, 0x3d, 0xa7, 0x78, + 0xc0, 0x4a, 0xc8, 0x59, 0xc8, 0x31, 0x1d, 0xa1, 0x9e, 0x47, 0xbd, 0x62, 0xfa, 0x5c, 0xf2, 0x62, + 0x4e, 0xeb, 0x16, 0x90, 0x2a, 0x9c, 0xf4, 0xac, 0x86, 0x6d, 0xf8, 0x6d, 0x97, 0xea, 0x46, 0xb3, + 0xe1, 0xb8, 0x96, 0xff, 0x70, 0xbb, 0x98, 0x41, 0x0c, 0x24, 0xac, 0x5a, 0x08, 0x6a, 0x18, 0x9c, + 0x56, 0xbb, 0xd6, 0xb4, 0xea, 0xfa, 0x16, 0xed, 0x14, 0xc7, 0xb0, 0x5d, 0x8e, 0x97, 0xdc, 0xa6, + 0x1d, 0x72, 0x06, 0x72, 0x5b, 0xb4, 0xa3, 0xb7, 0x51, 0xe6, 0x59, 0xec, 0x2d, 0xbb, 0x45, 0x3b, + 0x0f, 0x50, 0xde, 0x2f, 0x02, 0xa1, 0xbb, 0x3e, 0xb5, 0x4d, 0x6a, 0xea, 0xdd, 0x56, 0x39, 0x6c, + 0x55, 0x08, 0x6a, 0x6e, 0x8b, 0xd6, 0xea, 0x7d, 0x98, 0xea, 0x99, 0x5b, 0x92, 0x81, 0xc4, 0xd2, + 0x42, 0xe1, 0x04, 0xc9, 0x42, 0xea, 0xce, 0xdd, 0xe5, 0x95, 0x82, 0x42, 0x26, 0x20, 0xb7, 0xb4, + 0xb6, 0xba, 0x72, 0x67, 0x43, 0x5f, 0x5a, 0x28, 0x24, 0x08, 0x40, 0x86, 0x7f, 0x16, 0x92, 0x24, + 0x07, 0xe9, 0x07, 0xab, 0xac, 0x38, 0xc5, 0xe8, 0x1e, 0xac, 0x16, 0xd2, 0xaa, 0x03, 0x33, 0x51, + 0x7d, 0xf5, 0x5a, 0x8e, 0xed, 0x51, 0xf2, 0x19, 0x18, 0xaf, 0x4b, 0xe5, 0x45, 0x05, 0xa7, 0xfe, + 0xa5, 0x43, 0x4d, 0xbd, 0x98, 0xf3, 0x08, 0x23, 0xb5, 0x0a, 0x93, 0xa2, 0x7a, 0xd8, 0xda, 0xb8, + 0x95, 0xca, 0x26, 0x0a, 0x49, 0xf5, 0x0e, 0xc0, 0x7a, 0xc7, 0xf3, 0xe9, 0xf6, 0xaa, 0xbd, 0xe9, + 0xb0, 0xc9, 0xf5, 0xf0, 0x4b, 0x67, 0x76, 0x5a, 0x10, 0x80, 0x17, 0x69, 0xb0, 0x45, 0x5d, 0x9b, + 0x36, 0x79, 0x03, 0xae, 0x3a, 0xc0, 0x8b, 0x58, 0x03, 0xf5, 0xab, 0x49, 0x98, 0x0a, 0x11, 0x88, + 0xd1, 0xbe, 0x13, 0x85, 0x90, 0x5e, 0x5c, 0xd8, 0xdf, 0x2b, 0x67, 0xee, 0x30, 0x18, 0xcb, 0x3f, + 0xd9, 0x2b, 0xbf, 0xda, 0xb0, 0xfc, 0x87, 0xed, 0x5a, 0xa5, 0xee, 0x6c, 0x57, 0x43, 0x01, 0x98, + 0xb5, 0xee, 0xef, 0x6a, 0x6b, 0xab, 0x51, 0x15, 0x26, 0xbd, 0xc2, 0xc9, 0x82, 0x51, 0x90, 0xb7, + 0x60, 0x4c, 0x28, 0x17, 0x82, 0xc9, 0x5f, 0x99, 0x97, 0x84, 0xc8, 0x6c, 0x57, 0xe5, 0x41, 0x68, + 0xbb, 0x16, 0x4c, 0xd3, 0x15, 0x52, 0x0b, 0x88, 0xc8, 0x1b, 0x00, 0xb8, 0x31, 0xf1, 0xf1, 0x24, + 0x91, 0xc5, 0xac, 0xc4, 0x02, 0x2b, 0x2b, 0x6c, 0x68, 0x82, 0x32, 0x87, 0x25, 0x28, 0x8c, 0xb5, + 0xa8, 0xb4, 0x52, 0x48, 0xfc, 0x5c, 0xcc, 0x24, 0x76, 0x25, 0x2d, 0x98, 0xc9, 0xa2, 0x5d, 0x87, + 0xbc, 0xf7, 0xb8, 0xa9, 0x07, 0xa3, 0x49, 0x8f, 0x34, 0x1a, 0xc2, 0xd8, 0xec, 0xef, 0x95, 0x61, + 0xfd, 0xfe, 0xda, 0x02, 0xa7, 0xd4, 0xc0, 0x7b, 0xdc, 0x14, 0xbf, 0xd5, 0x49, 0x18, 0x67, 0x02, + 0x0b, 0xb4, 0x41, 0xfd, 0x76, 0x12, 0x26, 0x44, 0x81, 0x98, 0x9c, 0x9b, 0x90, 0x66, 0xa2, 0x0c, + 0x74, 0xf0, 0xc5, 0x01, 0xf0, 0xf9, 0x76, 0x13, 0xec, 0x82, 0x38, 0x03, 0xeb, 0xf8, 0x21, 0x46, + 0xc1, 0x19, 0x90, 0xbf, 0x50, 0xe0, 0x64, 0xb0, 0xa3, 0xe8, 0xb5, 0x8e, 0x1e, 0xcc, 0x79, 0x02, + 0x19, 0xbf, 0x15, 0x23, 0x97, 0x08, 0xa2, 0xca, 0x9a, 0xe0, 0xb1, 0xd8, 0xc1, 0xb9, 0x36, 0x57, + 0x6c, 0xdf, 0xed, 0x2c, 0xde, 0x15, 0x23, 0x2d, 0xf4, 0x54, 0x2f, 0xbf, 0xff, 0xe3, 0xa3, 0x69, + 0x50, 0xa1, 0xd9, 0xd3, 0x4f, 0x69, 0x17, 0x66, 0x07, 0xf6, 0x4d, 0x0a, 0x90, 0x64, 0xc6, 0x07, + 0x95, 0x57, 0x63, 0x3f, 0xc9, 0x2a, 0xa4, 0x77, 0x8c, 0x66, 0x9b, 0x9b, 0xf9, 0xc9, 0x2b, 0xaf, + 0x4a, 0x83, 0xdb, 0xda, 0xa9, 0x04, 0x5b, 0x6c, 0x45, 0x6c, 0xf3, 0xa2, 0xd3, 0x80, 0x39, 0x17, + 0x9e, 0xc6, 0x39, 0xbc, 0x91, 0xb8, 0xa6, 0xa8, 0x17, 0x20, 0xcf, 0x1a, 0x0c, 0xdd, 0xcf, 0xbe, + 0x9f, 0x82, 0x9c, 0x66, 0x6c, 0xfa, 0x8c, 0x03, 0x33, 0x6f, 0xe0, 0xd2, 0x56, 0xd3, 0xaa, 0x1b, + 0x41, 0xcb, 0xd4, 0xe2, 0xc4, 0xfe, 0x5e, 0x39, 0xa7, 0xf1, 0xd2, 0xd5, 0x65, 0x2d, 0x27, 0x1a, + 0xac, 0x9a, 0xe4, 0xe3, 0x00, 0x0f, 0x0d, 0xd7, 0x44, 0xef, 0x81, 0x8a, 0xc5, 0x32, 0x5d, 0xe1, + 0x1b, 0x77, 0xe5, 0xa6, 0xe1, 0x9a, 0xc8, 0x34, 0xd0, 0xf2, 0x87, 0x41, 0x01, 0xdb, 0xb4, 0x9a, + 0xd4, 0x30, 0x71, 0x6d, 0xa4, 0x34, 0xfc, 0x4d, 0x66, 0x20, 0xcd, 0xd9, 0xa4, 0x10, 0x1e, 0xff, + 0x60, 0x7b, 0x8a, 0xd1, 0x6a, 0x35, 0x2d, 0x6a, 0xa2, 0xf6, 0xa6, 0xb4, 0xe0, 0x93, 0x6c, 0x40, + 0xb6, 0xe5, 0x3a, 0x0d, 0x54, 0xec, 0x0c, 0xaa, 0xc3, 0x95, 0x18, 0x75, 0x08, 0x47, 0x58, 0xb9, + 0x27, 0x88, 0xb8, 0x0a, 0x70, 0x68, 0x21, 0x27, 0xf2, 0x3c, 0x4c, 0x31, 0x34, 0xba, 0xef, 0x1a, + 0xb6, 0xb7, 0x49, 0x5d, 0x4a, 0x71, 0x7f, 0x48, 0x69, 0x93, 0xac, 0x78, 0x23, 0x2c, 0x2d, 0xfd, + 0xba, 0x02, 0xd9, 0x80, 0x15, 0xc3, 0xbe, 0x6d, 0xf8, 0xf5, 0x87, 0x5c, 0x60, 0x1a, 0xff, 0x60, + 0xa3, 0xb4, 0xe9, 0x2e, 0xdf, 0x0c, 0x53, 0x1a, 0xfe, 0xee, 0x8e, 0x32, 0x29, 0x8f, 0x72, 0x0e, + 0x32, 0x2d, 0xa3, 0xed, 0x51, 0x13, 0x07, 0x9f, 0xd5, 0xc4, 0x17, 0xb9, 0x04, 0x85, 0x16, 0xb5, + 0x4d, 0xcb, 0x6e, 0xe8, 0x9e, 0x6d, 0xb4, 0xbc, 0x87, 0x8e, 0x2f, 0xc4, 0x30, 0x25, 0xca, 0xd7, + 0x45, 0x71, 0xe9, 0x11, 0x4c, 0x44, 0x46, 0x26, 0x2b, 0x58, 0x8a, 0x2b, 0xd8, 0x92, 0xac, 0x60, + 0xf1, 0x5b, 0x43, 0xbf, 0xb8, 0x64, 0xd5, 0xda, 0x4f, 0xc0, 0x84, 0x66, 0xd8, 0x0d, 0x7a, 0xcf, + 0x75, 0x6a, 0x4d, 0xba, 0xed, 0x91, 0x73, 0x90, 0x6f, 0xdb, 0xc6, 0x8e, 0x61, 0x35, 0x8d, 0x5a, + 0x93, 0x3b, 0x41, 0x59, 0x4d, 0x2e, 0x22, 0x57, 0xe1, 0x14, 0x93, 0x20, 0x75, 0x75, 0xdb, 0xf1, + 0x75, 0xee, 0x74, 0x3e, 0x74, 0x9a, 0x26, 0x75, 0x11, 0x4e, 0x56, 0x9b, 0xe1, 0xd5, 0x77, 0x1c, + 0x7f, 0x8d, 0x55, 0xde, 0xc4, 0x3a, 0xf2, 0x2c, 0x4c, 0xda, 0x8e, 0xce, 0x34, 0x4a, 0xe7, 0xf5, + 0x28, 0xb8, 0xac, 0x36, 0x6e, 0x3b, 0x0c, 0xe3, 0x1a, 0x96, 0x91, 0x8b, 0x30, 0xd5, 0xb6, 0x4d, + 0xea, 0x0a, 0xcd, 0xf4, 0x43, 0x41, 0xf6, 0x16, 0x93, 0xd3, 0x90, 0xb5, 0x1d, 0xde, 0x3d, 0x4a, + 0x32, 0xab, 0x8d, 0xd9, 0x0e, 0x76, 0x48, 0xae, 0x41, 0xf1, 0x71, 0xdb, 0xa2, 0x5e, 0x9d, 0xda, + 0xbe, 0x4e, 0x1f, 0xb7, 0x8d, 0xa6, 0xa7, 0xfb, 0x56, 0x7d, 0xcb, 0xb2, 0x1b, 0xe8, 0x4b, 0x64, + 0xb5, 0xb9, 0xb0, 0x7e, 0x05, 0xab, 0x37, 0x78, 0x2d, 0x79, 0x01, 0x08, 0x47, 0xe8, 0x34, 0x74, + 0xdf, 0x71, 0xf4, 0xa6, 0xe1, 0x36, 0xb8, 0xde, 0x64, 0xb5, 0x29, 0x56, 0xb3, 0xe6, 0x34, 0x36, + 0x1c, 0x67, 0x8d, 0x15, 0x93, 0x0b, 0x30, 0xe9, 0xec, 0x44, 0xa0, 0x66, 0xb1, 0x61, 0x4f, 0xa9, + 0xba, 0x05, 0x53, 0x28, 0x63, 0x36, 0x0d, 0x16, 0x9e, 0x24, 0x98, 0xef, 0xf1, 0xb8, 0x4d, 0x5d, + 0x8b, 0x7a, 0x7a, 0x8b, 0xba, 0xba, 0x47, 0xeb, 0x8e, 0xcd, 0x17, 0xa9, 0xa2, 0x15, 0x44, 0xcd, + 0x3d, 0xea, 0xae, 0x63, 0x39, 0xb9, 0x0c, 0xd3, 0x3f, 0xeb, 0x5a, 0x7e, 0xb4, 0x71, 0x02, 0x1b, + 0x4f, 0xf1, 0x8a, 0xb0, 0xad, 0x7a, 0x13, 0xe0, 0x9e, 0x4b, 0x7d, 0xbf, 0xb3, 0xde, 0x32, 0x6c, + 0xe6, 0x00, 0x79, 0xbe, 0xe1, 0xfa, 0x7a, 0xa0, 0x40, 0x39, 0x2d, 0x8b, 0x05, 0xcc, 0x3b, 0x3a, + 0x05, 0x63, 0xd4, 0x46, 0xdf, 0x47, 0x6c, 0xd5, 0x19, 0x6a, 0x33, 0x87, 0xe7, 0x8d, 0xd4, 0x7f, + 0xfe, 0x6e, 0x59, 0x51, 0xbf, 0x9a, 0x65, 0xe6, 0xc4, 0x6e, 0x50, 0xdc, 0x80, 0x3e, 0x05, 0x29, + 0xaf, 0x65, 0xd8, 0xc8, 0x24, 0x7e, 0x1f, 0xeb, 0x76, 0x2f, 0xd6, 0x24, 0x12, 0x92, 0x55, 0x00, + 0x14, 0xad, 0x6c, 0x61, 0x9e, 0x1d, 0x45, 0x71, 0x03, 0xa3, 0xe3, 0x86, 0xa6, 0xed, 0xba, 0x6c, + 0x60, 0xf2, 0x57, 0x2e, 0x0f, 0xb5, 0xaf, 0xe1, 0x30, 0x82, 0x3d, 0x89, 0x2f, 0xd6, 0x6d, 0x98, + 0xf4, 0x9c, 0xb6, 0x5b, 0xa7, 0xe1, 0x6e, 0x94, 0x46, 0x0f, 0xe4, 0xc6, 0xfe, 0x5e, 0x79, 0x7c, + 0x1d, 0x6b, 0x8e, 0xe7, 0x87, 0x8c, 0x7b, 0x5d, 0x26, 0x26, 0x79, 0x0c, 0x53, 0xa2, 0x3b, 0x86, + 0x0c, 0xfb, 0xcb, 0x60, 0x7f, 0xab, 0xfb, 0x7b, 0xe5, 0x09, 0xde, 0xdf, 0x3a, 0xab, 0xc1, 0x0e, + 0x5f, 0x3b, 0x54, 0x87, 0x82, 0x4e, 0x9b, 0xf0, 0x24, 0x36, 0x66, 0xff, 0xc1, 0x63, 0x6c, 0xc0, + 0xc1, 0x63, 0x09, 0x26, 0xc4, 0x2a, 0xb6, 0x18, 0xb0, 0x0e, 0x7a, 0xca, 0xf9, 0x2b, 0x45, 0x49, + 0xac, 0x41, 0x37, 0xb8, 0xbe, 0x02, 0xdf, 0x12, 0x89, 0x6e, 0x72, 0x1a, 0x72, 0x0b, 0x8d, 0x38, + 0xda, 0x90, 0x62, 0x0e, 0xa7, 0xe5, 0x62, 0xec, 0xe4, 0x4a, 0x36, 0x47, 0x32, 0xdd, 0xdc, 0x06, + 0x89, 0xf9, 0xf5, 0x8a, 0xd0, 0x37, 0xbf, 0x03, 0x19, 0x75, 0x17, 0x96, 0x3c, 0xbf, 0x1e, 0xf9, + 0x1c, 0x4c, 0x34, 0x99, 0xfd, 0xa6, 0x9e, 0xde, 0x74, 0xea, 0x46, 0xb3, 0x98, 0x47, 0x7e, 0xaf, + 0x0c, 0xd5, 0x97, 0x35, 0x46, 0xf5, 0xb6, 0x61, 0x1b, 0x0d, 0xea, 0x4a, 0x6a, 0x33, 0x2e, 0xb8, + 0xad, 0x31, 0x66, 0xe4, 0x5d, 0x98, 0x0c, 0xb8, 0x37, 0x9a, 0x4e, 0xcd, 0x68, 0x16, 0xc7, 0x8f, + 0xc7, 0x3e, 0x00, 0x7b, 0x03, 0xb9, 0x91, 0x07, 0x30, 0x2e, 0x9f, 0xe8, 0x8b, 0x13, 0xc8, 0xfd, + 0xc5, 0xe1, 0xdc, 0x19, 0x51, 0xc4, 0x05, 0xcb, 0x37, 0xbb, 0x45, 0xec, 0x04, 0x16, 0x1a, 0xbf, + 0xe2, 0x24, 0x1a, 0xac, 0x6e, 0x01, 0xdb, 0xa5, 0x03, 0x4b, 0x39, 0xc5, 0x8d, 0xaa, 0xf8, 0x54, + 0x7f, 0x4d, 0x11, 0x5b, 0xc5, 0xd0, 0xc3, 0x03, 0x31, 0x20, 0xe7, 0xb2, 0x96, 0xba, 0x65, 0x7a, + 0xe8, 0xe0, 0x25, 0x17, 0x97, 0xf7, 0xf7, 0xca, 0x59, 0xbe, 0x0c, 0x97, 0xbd, 0x43, 0x6b, 0xb7, + 0x20, 0xd4, 0xb2, 0xc8, 0x76, 0xd5, 0xf4, 0xd4, 0x0d, 0x98, 0x0c, 0xc0, 0x08, 0x57, 0x75, 0x11, + 0x32, 0x58, 0x1b, 0xf8, 0xaa, 0xcf, 0x0e, 0xd3, 0x1a, 0x49, 0xf2, 0x82, 0x52, 0xbd, 0x08, 0x13, + 0x37, 0x30, 0xde, 0x34, 0xd4, 0xd7, 0xfa, 0x6e, 0x02, 0xa6, 0x56, 0x30, 0x3c, 0xc3, 0xc4, 0xea, + 0xa1, 0x89, 0x7c, 0x17, 0xb2, 0xe1, 0xc2, 0xe6, 0x47, 0x99, 0xa5, 0xfd, 0xbd, 0xf2, 0xd8, 0x71, + 0x97, 0xf4, 0x98, 0x27, 0x16, 0xf3, 0x26, 0xcc, 0xb1, 0xc9, 0xa0, 0xae, 0xa7, 0x1b, 0xb6, 0xc9, + 0x57, 0x6b, 0xc3, 0x35, 0xb6, 0x83, 0xc3, 0xcd, 0xcb, 0xf2, 0x88, 0xb9, 0x3a, 0x54, 0x82, 0x10, + 0x52, 0x65, 0x83, 0x53, 0x2e, 0xd8, 0xe6, 0xcd, 0x90, 0x4e, 0x9b, 0xf1, 0x07, 0x94, 0x92, 0x1b, + 0x90, 0xe7, 0x64, 0x3a, 0xc6, 0x41, 0x92, 0xe8, 0xc4, 0x5e, 0x88, 0x63, 0xce, 0x25, 0x81, 0x01, + 0x0f, 0xa0, 0xe1, 0x6f, 0xf5, 0x25, 0x20, 0x92, 0x8c, 0x86, 0xca, 0xf4, 0xa7, 0xe1, 0x64, 0xa4, + 0xb9, 0x98, 0xd8, 0xd0, 0x1a, 0xf0, 0x79, 0x8d, 0xb3, 0x06, 0x3d, 0x33, 0x12, 0xb1, 0x06, 0xea, + 0xcf, 0x00, 0x6c, 0xb8, 0x46, 0x9d, 0xae, 0xec, 0x30, 0x45, 0xbf, 0x06, 0x29, 0xdf, 0xda, 0xa6, + 0x62, 0x3f, 0x2b, 0x55, 0x78, 0x70, 0xa9, 0x12, 0x04, 0x97, 0x2a, 0x1b, 0x41, 0x70, 0x69, 0x31, + 0xcb, 0x98, 0x7c, 0xe3, 0xc7, 0x65, 0x45, 0x43, 0x0a, 0xb6, 0x44, 0xa2, 0x61, 0x9c, 0xe0, 0x53, + 0xfd, 0xbe, 0x02, 0x53, 0x0b, 0x4d, 0x66, 0x6a, 0x7c, 0xc7, 0x5d, 0x76, 0x3b, 0x5a, 0xdb, 0x66, + 0x4a, 0x11, 0xac, 0x05, 0xec, 0x2b, 0xc9, 0x95, 0x42, 0x68, 0xf4, 0x91, 0x57, 0xc2, 0x98, 0x58, + 0x09, 0xe4, 0x4d, 0xc8, 0x50, 0x36, 0x20, 0x4f, 0x9c, 0xa4, 0xe2, 0x76, 0xe6, 0xee, 0xf0, 0x35, + 0x41, 0xa4, 0x5e, 0x81, 0xd9, 0x10, 0x31, 0xf2, 0x0e, 0x66, 0xe9, 0x74, 0x2f, 0xee, 0xb0, 0x4b, + 0xf5, 0x4f, 0x15, 0x98, 0xeb, 0x25, 0x1a, 0x7c, 0x98, 0x4f, 0x7e, 0x98, 0x87, 0xf9, 0x25, 0x18, + 0x33, 0xdd, 0x8e, 0xee, 0xb6, 0x6d, 0xa1, 0xef, 0x71, 0x9a, 0xd0, 0x33, 0x0d, 0x5a, 0xc6, 0xc4, + 0x7f, 0xd5, 0xaf, 0x29, 0x50, 0xe8, 0x62, 0xff, 0x3f, 0x60, 0xc8, 0xde, 0x81, 0x69, 0x09, 0x8f, + 0x10, 0xe3, 0x0a, 0x64, 0xc5, 0x50, 0x47, 0xd1, 0xfa, 0xde, 0xb1, 0x8e, 0xf1, 0xb1, 0x7a, 0xaa, + 0x0a, 0xe3, 0xb7, 0xd6, 0xef, 0xde, 0x09, 0xd9, 0x06, 0x11, 0x46, 0xa5, 0x1b, 0x61, 0x54, 0x3f, + 0x50, 0x20, 0xbf, 0xe6, 0x34, 0x86, 0x1b, 0xf5, 0x19, 0x48, 0x37, 0xe9, 0x0e, 0x6d, 0x0a, 0xa5, + 0xe7, 0x1f, 0xe4, 0x19, 0x00, 0xee, 0x60, 0xe2, 0x62, 0xe2, 0x47, 0x21, 0xee, 0x72, 0xb2, 0x05, + 0xc4, 0xb4, 0x88, 0xb9, 0x98, 0x58, 0xc9, 0x4f, 0x83, 0xcc, 0xe5, 0xc4, 0xaa, 0x02, 0x24, 0xb7, + 0x8d, 0x5d, 0xf4, 0xb8, 0x72, 0x1a, 0xfb, 0xc9, 0x16, 0x56, 0xcb, 0xf0, 0x7d, 0xea, 0xda, 0x22, + 0xe2, 0x17, 0x7c, 0xb2, 0x53, 0x95, 0x4b, 0x4d, 0xa3, 0xee, 0x0b, 0x57, 0x5c, 0x7c, 0xb1, 0x33, + 0xde, 0x16, 0xa5, 0x2d, 0x9d, 0x7f, 0xe2, 0x81, 0x45, 0xb8, 0xe0, 0xac, 0x58, 0x0b, 0x4b, 0xd5, + 0xbb, 0x40, 0xd6, 0x9c, 0x06, 0x3b, 0x4e, 0x59, 0xd2, 0x96, 0xf1, 0x09, 0xe6, 0x00, 0x63, 0x91, + 0x90, 0xf2, 0xe9, 0xde, 0x80, 0x4a, 0xd3, 0x69, 0x54, 0xe4, 0xe3, 0x65, 0xd0, 0x5e, 0xad, 0xc0, + 0xc9, 0x35, 0xa7, 0x71, 0xdd, 0x6a, 0x52, 0x6f, 0xcd, 0xf2, 0xfc, 0xa1, 0xb6, 0xed, 0x1e, 0xcc, + 0x44, 0xdb, 0x0b, 0x08, 0xd7, 0x20, 0xbd, 0xc9, 0x0a, 0x05, 0x80, 0xb3, 0x83, 0x00, 0x30, 0x2a, + 0xd9, 0x9c, 0x21, 0x81, 0xfa, 0x05, 0x98, 0x14, 0x1c, 0x87, 0x4e, 0x1d, 0x81, 0x14, 0xa3, 0x11, + 0x33, 0x87, 0xbf, 0x25, 0x91, 0x26, 0x87, 0x89, 0x34, 0x35, 0x50, 0xa4, 0x75, 0x98, 0x58, 0xf7, + 0x8d, 0xfa, 0xd6, 0x70, 0xcd, 0xf9, 0x84, 0x08, 0xa8, 0xf3, 0x68, 0x48, 0x6c, 0x08, 0x0c, 0x19, + 0x76, 0x03, 0xe7, 0xea, 0x3a, 0xa4, 0xd8, 0x08, 0xf1, 0x00, 0x6e, 0x08, 0x6b, 0x9d, 0xd3, 0xf0, + 0x37, 0x3b, 0xdb, 0xb0, 0x91, 0xe8, 0x9e, 0xf5, 0x79, 0xce, 0x3b, 0xa9, 0x65, 0x59, 0xc1, 0xba, + 0xf5, 0x79, 0x4a, 0x4a, 0x90, 0xad, 0x3b, 0xb6, 0x8f, 0x86, 0x91, 0x07, 0xd4, 0xc3, 0x6f, 0xf5, + 0xb7, 0x15, 0x98, 0xba, 0x41, 0x7d, 0x9c, 0x8c, 0xa1, 0xe0, 0xcf, 0x40, 0xae, 0x69, 0x79, 0xbe, + 0xee, 0xd8, 0xcd, 0x8e, 0x38, 0xdf, 0x66, 0x59, 0xc1, 0x5d, 0xbb, 0xd9, 0x21, 0xaf, 0x8b, 0x91, + 0xa5, 0x71, 0x64, 0xe7, 0x63, 0x46, 0xc6, 0x3a, 0x93, 0x2e, 0x04, 0x4a, 0x90, 0x15, 0xba, 0xcd, + 0x43, 0x1e, 0x39, 0x2d, 0xfc, 0x56, 0x57, 0xa1, 0xd0, 0x45, 0x27, 0xd4, 0xe4, 0x6a, 0x54, 0x4d, + 0xca, 0x43, 0x7a, 0x0a, 0x74, 0xe4, 0x8b, 0x30, 0x79, 0xcf, 0x75, 0x36, 0x47, 0xd1, 0x91, 0xc5, + 0xc8, 0x50, 0x2a, 0xb1, 0xe7, 0x3b, 0x99, 0x63, 0x45, 0x9a, 0xad, 0x02, 0xa4, 0x30, 0x30, 0x9e, + 0x85, 0xd4, 0xcd, 0x95, 0x85, 0x7b, 0x85, 0x13, 0xea, 0x25, 0x98, 0x7c, 0x9b, 0xfa, 0xae, 0x55, + 0x1f, 0xbe, 0xfb, 0xff, 0x01, 0xfa, 0x97, 0x9b, 0x3e, 0x9a, 0x48, 0x66, 0xfa, 0x9f, 0x6a, 0x64, + 0xf8, 0xd3, 0x90, 0x46, 0x13, 0x3c, 0xd2, 0x41, 0xb4, 0xe7, 0xf0, 0x88, 0x84, 0xea, 0x65, 0xe6, + 0x81, 0x0a, 0xb8, 0x2b, 0xec, 0x38, 0x25, 0x3b, 0x06, 0x4a, 0xd4, 0x31, 0xf8, 0x52, 0x02, 0xa6, + 0xc2, 0xc6, 0xc2, 0x0f, 0x7f, 0xda, 0x8e, 0xc1, 0x0d, 0xc8, 0xe0, 0x29, 0x2f, 0x70, 0x0c, 0x2e, + 0x0d, 0x39, 0x6b, 0x77, 0x07, 0x12, 0x38, 0xc5, 0x9c, 0x9c, 0x2c, 0x07, 0x31, 0xe0, 0x24, 0xf2, + 0xb9, 0x38, 0x0a, 0x1f, 0x26, 0xed, 0x48, 0xfc, 0x57, 0x6d, 0x43, 0x81, 0xd5, 0x2e, 0xd3, 0x5a, + 0xbb, 0x11, 0xe8, 0x42, 0x64, 0x7b, 0x55, 0x9e, 0xca, 0xf6, 0xfa, 0xcf, 0x09, 0x98, 0x96, 0xfa, + 0x15, 0xcb, 0xe9, 0x6b, 0x4a, 0xcf, 0x61, 0xe1, 0xda, 0x90, 0x41, 0x45, 0xc8, 0x79, 0x37, 0x22, + 0xec, 0xf8, 0x49, 0x36, 0xc8, 0xf7, 0x7f, 0x7c, 0x44, 0xa0, 0x02, 0xc5, 0x87, 0x36, 0x59, 0x25, + 0x0a, 0x79, 0x09, 0x9d, 0x1c, 0x3a, 0x4c, 0xf2, 0xd0, 0xe1, 0xa7, 0xa3, 0xa1, 0xc3, 0xcb, 0xa3, + 0x74, 0xd4, 0x1f, 0x92, 0xfe, 0x2b, 0x05, 0xc6, 0x36, 0x76, 0x6d, 0x3c, 0xf6, 0xdc, 0x87, 0x84, + 0x50, 0xe1, 0xf1, 0xc5, 0x05, 0x06, 0xe6, 0x5f, 0x46, 0x5d, 0x9b, 0xfc, 0xf6, 0xb8, 0x6d, 0x99, + 0x95, 0x07, 0x0f, 0x56, 0xd9, 0xcc, 0x27, 0x56, 0x97, 0xb5, 0x84, 0x65, 0x92, 0x37, 0xd0, 0xe5, + 0x77, 0x7d, 0x01, 0x72, 0x34, 0xef, 0x9c, 0x93, 0xb0, 0x0d, 0xcc, 0xdf, 0xb5, 0x75, 0x93, 0x7a, + 0x75, 0xd7, 0x6a, 0xf9, 0x96, 0x63, 0x0b, 0xb7, 0x64, 0xd2, 0xdf, 0xb5, 0x97, 0xbb, 0xa5, 0xea, + 0xff, 0x24, 0x20, 0xbf, 0x50, 0xf7, 0xad, 0x1d, 0x7a, 0xbf, 0x4d, 0xdd, 0x0e, 0x99, 0x0b, 0xc7, + 0x91, 0x5b, 0xcc, 0x48, 0x60, 0x0a, 0x90, 0xf4, 0x1e, 0x07, 0x6e, 0x0f, 0xfb, 0xd9, 0x85, 0x97, + 0x3c, 0x3c, 0xbc, 0xe7, 0x60, 0xd2, 0xf2, 0x74, 0xd3, 0xf2, 0x7c, 0xd7, 0xaa, 0xb5, 0xbb, 0xf1, + 0xcd, 0x09, 0xcb, 0x5b, 0xee, 0x16, 0x92, 0x45, 0x48, 0xb7, 0x1e, 0x06, 0xa1, 0xcd, 0xc9, 0x81, + 0x17, 0x2f, 0xa1, 0xfb, 0xd7, 0x1d, 0x43, 0xe5, 0x1e, 0xa3, 0xd1, 0x38, 0x29, 0x6e, 0x32, 0xdd, + 0xb8, 0xba, 0x72, 0x31, 0x21, 0x45, 0xc7, 0x3f, 0x0b, 0x19, 0x26, 0x25, 0xcb, 0x44, 0x8f, 0x6a, + 0x7c, 0x71, 0xf9, 0x78, 0x13, 0x97, 0x66, 0xca, 0xb0, 0xac, 0xa5, 0xfd, 0x5d, 0x7b, 0xd5, 0x54, + 0x9f, 0x83, 0x34, 0x02, 0x21, 0x13, 0x90, 0xbb, 0xa7, 0xad, 0xdc, 0x5b, 0xd0, 0x56, 0xef, 0xdc, + 0x28, 0x9c, 0x60, 0x9f, 0x2b, 0x3f, 0xb5, 0xb2, 0xf4, 0x60, 0x83, 0x7d, 0x2a, 0xea, 0x2b, 0x70, + 0x92, 0xf9, 0x42, 0xeb, 0xd4, 0xf3, 0x2c, 0xc7, 0x0e, 0x77, 0x88, 0x12, 0x64, 0xdb, 0x1e, 0x75, + 0xa5, 0xfd, 0x3e, 0xfc, 0x56, 0xbf, 0x9d, 0x86, 0x31, 0xd1, 0xfe, 0xa9, 0x6e, 0x0f, 0x32, 0x86, + 0x44, 0x14, 0x03, 0x9b, 0xc1, 0x7a, 0xd3, 0xa2, 0xb6, 0x1f, 0xde, 0xc6, 0x71, 0xfd, 0x9a, 0xe0, + 0xa5, 0xe2, 0x72, 0x8d, 0x5c, 0x82, 0x02, 0x5e, 0x70, 0xd4, 0x31, 0x89, 0x41, 0x47, 0x56, 0xdc, + 0x05, 0x9e, 0x92, 0xca, 0xef, 0x30, 0x8e, 0xeb, 0x30, 0x69, 0xe0, 0x24, 0xea, 0x22, 0xf4, 0x8b, + 0x37, 0xe3, 0xf9, 0xe8, 0x99, 0xfb, 0xe0, 0x59, 0x0f, 0xc2, 0x47, 0x46, 0x58, 0x64, 0x51, 0xaf, + 0xab, 0xa4, 0x99, 0xc3, 0x2b, 0xe9, 0xbb, 0x90, 0xdb, 0xda, 0xd1, 0x23, 0x0a, 0xb2, 0x78, 0x74, + 0xe5, 0x18, 0xbb, 0xbd, 0xc3, 0xd5, 0x63, 0x6c, 0x0b, 0x7f, 0x60, 0x40, 0xbb, 0x69, 0x78, 0xbe, + 0x2e, 0x8d, 0xba, 0x83, 0x9e, 0x7b, 0x4e, 0x9b, 0x62, 0x15, 0xfd, 0xcb, 0x32, 0x87, 0x20, 0xe4, + 0x65, 0x59, 0x86, 0xbc, 0xc1, 0x0e, 0x3e, 0x7a, 0xad, 0xe3, 0x53, 0x1e, 0x2a, 0x4c, 0x6a, 0x80, + 0x45, 0x8b, 0xac, 0x84, 0x5c, 0x80, 0xa9, 0x6d, 0x63, 0x57, 0x97, 0x1b, 0xe5, 0xb1, 0xd1, 0xc4, + 0xb6, 0xb1, 0xbb, 0xd0, 0x6d, 0xb7, 0x00, 0x20, 0x70, 0xf8, 0xbb, 0xb6, 0x88, 0xe1, 0xa9, 0x71, + 0xa7, 0x68, 0x6e, 0xf7, 0xb4, 0x1c, 0xa7, 0xda, 0xd8, 0xb5, 0xd5, 0x5f, 0x55, 0x60, 0x5a, 0x56, + 0x65, 0xee, 0x0f, 0x3c, 0x4d, 0x05, 0x3d, 0x38, 0x08, 0xf1, 0xfb, 0x0a, 0xcc, 0x44, 0x97, 0x95, + 0xd8, 0xf4, 0x96, 0x21, 0xeb, 0x89, 0x32, 0xb1, 0xeb, 0xc5, 0x8d, 0x52, 0x90, 0x07, 0xb1, 0xd9, + 0x80, 0x92, 0xdc, 0xea, 0xd9, 0xa9, 0xe2, 0x2c, 0x53, 0x9f, 0x48, 0xa2, 0x9b, 0x95, 0xfa, 0x18, + 0xc8, 0x92, 0x61, 0xd7, 0x69, 0x13, 0x67, 0x7a, 0xa8, 0x8b, 0x7a, 0x01, 0xb2, 0xa8, 0x29, 0xfc, + 0xda, 0x98, 0x99, 0xe9, 0x3c, 0xd3, 0x2e, 0x24, 0x66, 0xda, 0x85, 0x95, 0x3d, 0x8b, 0x37, 0xd9, + 0x63, 0x40, 0x6e, 0xc0, 0xc9, 0x48, 0x97, 0x42, 0x36, 0xec, 0xb8, 0x80, 0xc5, 0xd4, 0x14, 0x57, + 0x5e, 0xe1, 0x37, 0x3b, 0xf8, 0x22, 0xde, 0xe0, 0xe0, 0x8b, 0x1f, 0x6a, 0x07, 0x66, 0x38, 0x23, + 0x31, 0xc0, 0xa1, 0xe8, 0x5f, 0x04, 0x10, 0x42, 0x0c, 0xf0, 0x8f, 0xf3, 0xfb, 0x58, 0xc1, 0x60, + 0x75, 0x59, 0xcb, 0x89, 0x06, 0x43, 0xc6, 0xb0, 0x0a, 0xb3, 0x3d, 0x5d, 0x1f, 0x79, 0x14, 0xff, + 0xaa, 0x40, 0x61, 0xbd, 0x65, 0xd8, 0x91, 0x00, 0xdd, 0xf9, 0x9e, 0x21, 0x2c, 0x42, 0x57, 0x6f, + 0xc3, 0xe1, 0x68, 0xf2, 0xcd, 0x12, 0x1f, 0xcd, 0xd5, 0x9f, 0xec, 0x95, 0x5f, 0x39, 0x9c, 0x1b, + 0x74, 0x9b, 0x76, 0xa4, 0x0b, 0xa9, 0x3b, 0xdd, 0x0b, 0xa9, 0xe4, 0x71, 0x38, 0x8a, 0x7b, 0x2c, + 0xf5, 0x4f, 0x14, 0x98, 0x96, 0x46, 0x27, 0xa4, 0xb4, 0x06, 0x79, 0xdf, 0xf1, 0x8d, 0xa6, 0x1e, + 0x44, 0x15, 0xfb, 0x2e, 0xb4, 0x7a, 0xc3, 0x9b, 0x6f, 0xff, 0xff, 0xa5, 0x25, 0xe4, 0x11, 0x24, + 0x66, 0x20, 0x3d, 0x96, 0x30, 0x33, 0xc4, 0x7d, 0xd8, 0xba, 0xd3, 0xb6, 0xb9, 0xc3, 0x92, 0xd6, + 0x00, 0x8b, 0x96, 0x58, 0x09, 0x79, 0x0d, 0xe6, 0x8c, 0x56, 0xcb, 0x75, 0x76, 0xad, 0x6d, 0xc3, + 0xa7, 0x6c, 0xe7, 0xdf, 0x12, 0xd6, 0x88, 0xdf, 0x99, 0xcf, 0x48, 0xb5, 0xcb, 0x96, 0xb7, 0x85, + 0x46, 0x49, 0xfd, 0x7f, 0x30, 0x23, 0xae, 0x47, 0xa2, 0x31, 0xf7, 0x51, 0xe6, 0x46, 0xfd, 0xce, + 0x04, 0xcc, 0xf6, 0x50, 0xf7, 0xc7, 0xe7, 0xb2, 0x1f, 0xb6, 0x49, 0xfa, 0x3b, 0x05, 0x4e, 0x06, + 0x57, 0x38, 0x72, 0x86, 0x47, 0x0e, 0xed, 0xc4, 0xf5, 0xf8, 0x13, 0x65, 0x3f, 0xd6, 0x4a, 0x78, + 0x3d, 0x34, 0x38, 0xd3, 0xa3, 0xa7, 0xfa, 0xe8, 0x99, 0x1e, 0xad, 0x9e, 0x7e, 0x4a, 0xff, 0x98, + 0xe3, 0x79, 0x31, 0xe1, 0x9d, 0x78, 0xdf, 0x2d, 0x9a, 0x32, 0xe0, 0x16, 0xed, 0x17, 0x14, 0x98, + 0x95, 0xae, 0xc9, 0xf5, 0xde, 0xc0, 0xe1, 0xdd, 0xfd, 0xbd, 0xf2, 0xc9, 0x07, 0xdd, 0x06, 0xc7, + 0x3e, 0xe4, 0x9c, 0x6c, 0xf7, 0x32, 0x33, 0x3d, 0xf2, 0x87, 0x0a, 0x5c, 0x90, 0xee, 0xd8, 0xfb, + 0xae, 0xe8, 0x25, 0x58, 0x49, 0x84, 0xf5, 0xb9, 0xfd, 0xbd, 0xf2, 0xb9, 0xee, 0x05, 0x7c, 0xf4, + 0xd2, 0xfe, 0xd8, 0x18, 0xcf, 0xb9, 0xb1, 0x9c, 0x4d, 0x8f, 0x7c, 0x59, 0x81, 0x62, 0x34, 0x2f, + 0x40, 0x82, 0x98, 0x42, 0x88, 0xf7, 0xf6, 0xf7, 0xca, 0x33, 0x77, 0xa4, 0x2c, 0x81, 0x63, 0xc3, + 0x9a, 0xb1, 0xfb, 0xb8, 0x99, 0x1e, 0xd9, 0x05, 0x12, 0x64, 0x14, 0x48, 0x18, 0xd2, 0x88, 0xe1, + 0xf6, 0xfe, 0x5e, 0x79, 0xea, 0x0e, 0xcf, 0x2f, 0x38, 0x76, 0xf7, 0x53, 0xb6, 0xcc, 0xc8, 0xf4, + 0xc8, 0xd7, 0x15, 0x38, 0xdd, 0x93, 0xdf, 0x20, 0x21, 0xc8, 0x20, 0x82, 0xf5, 0xfd, 0xbd, 0xf2, + 0xa9, 0x07, 0xd1, 0x46, 0xc7, 0x46, 0x72, 0xaa, 0x3d, 0x88, 0xa1, 0xe9, 0x91, 0xdf, 0x53, 0x40, + 0x3d, 0x28, 0x87, 0x42, 0x82, 0x36, 0x86, 0xd0, 0xde, 0xd9, 0xdf, 0x2b, 0xcf, 0xdf, 0x1f, 0x98, + 0x51, 0x71, 0x6c, 0x84, 0xf3, 0x8f, 0x63, 0xf8, 0x9a, 0x1e, 0xf9, 0x96, 0x02, 0x67, 0xfb, 0x53, + 0x36, 0x24, 0x88, 0xd9, 0xae, 0xf4, 0xb4, 0x68, 0x02, 0xc7, 0xf1, 0xa5, 0xe7, 0x0e, 0x62, 0x68, + 0x7a, 0xe4, 0xab, 0x0a, 0x14, 0xa3, 0x49, 0x20, 0x12, 0xa0, 0x1c, 0x02, 0xd2, 0xf6, 0xf7, 0xca, + 0x73, 0x77, 0x77, 0x3e, 0xd4, 0xd9, 0x9c, 0x73, 0x76, 0x06, 0x4d, 0x66, 0xe9, 0x7d, 0x25, 0xdc, + 0x0f, 0x86, 0xe6, 0xae, 0xad, 0x47, 0xe3, 0x03, 0x6f, 0x1e, 0xda, 0x6c, 0xcb, 0x96, 0x53, 0x0a, + 0x19, 0xdc, 0x4a, 0x65, 0x95, 0x42, 0x56, 0x7d, 0x1d, 0x0a, 0x37, 0x1d, 0xff, 0x08, 0x7b, 0xda, + 0x57, 0xc6, 0x60, 0x5a, 0xa2, 0xfc, 0x08, 0x92, 0x47, 0xff, 0x5e, 0x81, 0xd9, 0x87, 0x8e, 0xcf, + 0x67, 0x6e, 0x40, 0xce, 0xe2, 0x52, 0x8c, 0x68, 0xfa, 0x90, 0x76, 0x4b, 0xa2, 0xdb, 0xd9, 0x3d, + 0xb1, 0x9d, 0x4d, 0xf7, 0xd6, 0x1f, 0x79, 0x3f, 0x9b, 0x7e, 0xd8, 0xdb, 0x53, 0x69, 0x07, 0xb2, + 0x01, 0x7b, 0xf2, 0x49, 0x48, 0x99, 0xd4, 0xab, 0x0b, 0xb7, 0x47, 0x1d, 0x90, 0xe3, 0x81, 0xed, + 0x82, 0x40, 0x49, 0xe8, 0xb3, 0x23, 0xd5, 0x01, 0x79, 0x4b, 0x89, 0xc1, 0x79, 0x4b, 0xa5, 0x7f, + 0x50, 0x60, 0x02, 0x6f, 0xb1, 0xc3, 0xf9, 0x7a, 0xda, 0x57, 0xe4, 0xef, 0x00, 0x74, 0xa7, 0x4c, + 0xcc, 0xd3, 0xd5, 0x23, 0xcd, 0x53, 0x98, 0xea, 0x18, 0xb4, 0x28, 0xfd, 0x8a, 0xc2, 0xdd, 0x82, + 0x70, 0x30, 0x23, 0xb9, 0x05, 0x1a, 0x64, 0x10, 0x5c, 0x80, 0xe6, 0x8d, 0x43, 0xa1, 0x89, 0x48, + 0x4f, 0x13, 0x9c, 0x4a, 0x5f, 0x84, 0xb9, 0xc1, 0xea, 0x34, 0x60, 0x3d, 0xdf, 0x8d, 0xae, 0xe7, + 0x4f, 0x1c, 0xaa, 0x7b, 0x79, 0xb8, 0x72, 0xf8, 0xef, 0x12, 0x8c, 0x8f, 0x7a, 0x59, 0xfc, 0xbd, + 0xb4, 0x48, 0x1b, 0xf9, 0x48, 0xd6, 0xac, 0x1c, 0x54, 0x4f, 0x3c, 0x85, 0xa0, 0xfa, 0x5f, 0x2b, + 0x30, 0xe3, 0x8a, 0x81, 0x44, 0x4c, 0x02, 0x8f, 0x8d, 0x7f, 0x6a, 0xd8, 0x35, 0x42, 0x37, 0x84, + 0x1c, 0x30, 0x39, 0xc0, 0x1c, 0xf4, 0xd6, 0x1f, 0xdd, 0x1c, 0xb8, 0xbd, 0x3d, 0x95, 0xbe, 0xd9, + 0xab, 0xc8, 0x25, 0xc8, 0x06, 0xad, 0x82, 0x73, 0xa3, 0x7b, 0xa0, 0x92, 0x0f, 0x7a, 0xba, 0xf2, + 0x69, 0x48, 0x5b, 0xf6, 0xa6, 0x13, 0x5c, 0x11, 0x1c, 0xea, 0x36, 0x05, 0x09, 0x4b, 0xef, 0xc1, + 0xdc, 0x60, 0x91, 0x0c, 0x50, 0xe9, 0xdb, 0x51, 0x95, 0xbe, 0x3a, 0xb2, 0xd0, 0x0f, 0x50, 0xe7, + 0x5b, 0xa9, 0x6c, 0xaa, 0x90, 0x56, 0x5f, 0x02, 0xb2, 0xdc, 0x7d, 0xec, 0x35, 0xf4, 0xbe, 0xea, + 0xa2, 0xb0, 0x6d, 0xc3, 0x5b, 0xfe, 0x51, 0x02, 0xc6, 0xb1, 0x69, 0xf0, 0xc2, 0xe8, 0x69, 0x5b, + 0xc1, 0x17, 0x60, 0x9a, 0xda, 0x75, 0xb7, 0x83, 0x71, 0xee, 0x20, 0x7d, 0x0c, 0xcf, 0xe8, 0x5a, + 0xa1, 0x5b, 0x21, 0xee, 0xa1, 0xca, 0xc1, 0x71, 0x98, 0x5f, 0x30, 0xf2, 0x43, 0x29, 0x3f, 0xe1, + 0xe2, 0x1d, 0x64, 0xb7, 0x01, 0x3f, 0xb5, 0xa6, 0xa4, 0x06, 0x3c, 0x80, 0x76, 0x11, 0x0a, 0x22, + 0x80, 0xb6, 0x45, 0x3b, 0x82, 0x0d, 0xcf, 0x6d, 0x16, 0x61, 0xcd, 0xdb, 0xb4, 0xc3, 0x59, 0x45, + 0x5b, 0x72, 0x7e, 0x99, 0x9e, 0x96, 0xfc, 0xfc, 0xfb, 0x19, 0x98, 0x0c, 0xa4, 0x1b, 0xe6, 0x44, + 0x04, 0x86, 0x94, 0x07, 0xaf, 0x9e, 0x8f, 0xbd, 0x47, 0xee, 0x4a, 0x3b, 0x88, 0x39, 0x71, 0x62, + 0xf5, 0x1a, 0x4c, 0x63, 0x12, 0xe9, 0x36, 0xb5, 0x0f, 0x17, 0xf1, 0x50, 0xbf, 0x95, 0x06, 0x22, + 0x93, 0x0a, 0x5c, 0x2d, 0xcc, 0x80, 0x10, 0xa5, 0x02, 0xdb, 0xad, 0xf8, 0x3b, 0xee, 0x1e, 0x16, + 0x95, 0x25, 0xa7, 0xd9, 0xa4, 0x75, 0x9f, 0x9a, 0x61, 0x5d, 0x5f, 0x46, 0xa3, 0xd4, 0x07, 0x59, + 0x02, 0xc0, 0xe8, 0xa9, 0x4b, 0x3d, 0x7a, 0xb8, 0x3b, 0x88, 0x1c, 0xa3, 0xd3, 0x18, 0x19, 0x79, + 0x1d, 0x8a, 0x96, 0xed, 0x53, 0xd7, 0x36, 0x9a, 0xba, 0xd1, 0x6a, 0x61, 0x7c, 0x5a, 0x6f, 0xb9, + 0x74, 0xd3, 0xda, 0x15, 0x61, 0xea, 0xd9, 0xa0, 0x7e, 0xa1, 0xd5, 0xba, 0x63, 0x6c, 0xd3, 0x7b, + 0x58, 0x59, 0xfa, 0x5b, 0x05, 0xe6, 0x57, 0xc4, 0xeb, 0xa8, 0x01, 0x78, 0x6f, 0xd3, 0x0e, 0xb9, + 0x0e, 0x59, 0x36, 0xbf, 0x61, 0xae, 0x49, 0x4f, 0x78, 0xe5, 0x71, 0xb3, 0x32, 0x98, 0x30, 0x48, + 0xb2, 0xd8, 0xa2, 0x9d, 0x65, 0xc3, 0x37, 0xe4, 0x9d, 0x22, 0xf1, 0x21, 0xef, 0x14, 0x6c, 0x18, + 0x67, 0xe3, 0xe4, 0x4e, 0xcc, 0xae, 0xdd, 0xc9, 0x5f, 0x59, 0x3b, 0xdc, 0x84, 0xc6, 0xcb, 0x47, + 0x0c, 0x13, 0x6d, 0xd9, 0x5b, 0x41, 0x72, 0x5b, 0xa2, 0x3f, 0xee, 0x3c, 0x58, 0x4e, 0xd1, 0xa4, + 0xb6, 0x2f, 0x27, 0xa0, 0x14, 0x36, 0x8a, 0x98, 0xaf, 0x96, 0xe3, 0xfa, 0x64, 0x32, 0xbc, 0xd3, + 0x4a, 0x62, 0xd0, 0xfc, 0x2c, 0xe4, 0xea, 0xce, 0x76, 0xab, 0x49, 0x7d, 0x6a, 0x8a, 0x6c, 0x86, + 0x6e, 0x01, 0x79, 0x15, 0x66, 0x43, 0x35, 0xd3, 0x37, 0x2d, 0xbb, 0x41, 0xdd, 0x96, 0x6b, 0xd9, + 0xbe, 0x88, 0x40, 0xce, 0x84, 0x95, 0xd7, 0xbb, 0x75, 0xe4, 0x2d, 0x28, 0x76, 0x89, 0xa4, 0xd7, + 0xb2, 0x6c, 0xd6, 0xf0, 0xfd, 0x1f, 0x02, 0x56, 0xb4, 0x39, 0x6f, 0x00, 0x4c, 0xbc, 0xa7, 0x1e, + 0x77, 0xf9, 0x32, 0xa4, 0xa6, 0x6e, 0xf8, 0xe2, 0x69, 0xd3, 0x68, 0xfa, 0x9c, 0x0f, 0x29, 0x17, + 0x7c, 0xf5, 0x5d, 0x78, 0x7e, 0xc9, 0xa5, 0x86, 0x4f, 0x0f, 0x96, 0x47, 0xb0, 0xde, 0x0f, 0x1c, + 0xa8, 0x72, 0xf0, 0x40, 0xd5, 0x0e, 0x5c, 0x1c, 0xce, 0x5f, 0x18, 0x85, 0xb7, 0x21, 0xe3, 0x62, + 0x89, 0xd0, 0x9f, 0xab, 0xa3, 0xe8, 0x4f, 0x3f, 0x3b, 0xc1, 0x44, 0x7d, 0x16, 0xd4, 0x83, 0x5b, + 0x85, 0xcf, 0xb7, 0x7e, 0x0e, 0xce, 0xc7, 0xb6, 0x12, 0xd8, 0x1e, 0xc0, 0x18, 0x67, 0x1b, 0x58, + 0xab, 0xa3, 0x81, 0x0b, 0x16, 0xab, 0xe0, 0xa5, 0xfe, 0x99, 0x02, 0x33, 0x83, 0x5a, 0xf7, 0xe9, + 0xe0, 0x81, 0xc2, 0x4f, 0xc4, 0x68, 0xd9, 0x0d, 0x18, 0xaf, 0x07, 0xab, 0x95, 0x69, 0xc9, 0x61, + 0xac, 0x5e, 0x3e, 0xa4, 0x5c, 0xc0, 0x67, 0x3b, 0xbe, 0x6b, 0xd4, 0xc3, 0xc7, 0x49, 0xf8, 0xa1, + 0x7e, 0x06, 0xce, 0x0c, 0x1e, 0x29, 0xd7, 0x97, 0x6b, 0x31, 0x3a, 0xce, 0x07, 0x76, 0x80, 0x76, + 0xab, 0x8f, 0xe1, 0xec, 0x60, 0xc6, 0x62, 0x32, 0xee, 0x43, 0x5e, 0xe2, 0x27, 0xac, 0x40, 0xf5, + 0xb0, 0x13, 0x22, 0xf3, 0x50, 0x5f, 0x85, 0xe2, 0x2d, 0xa7, 0xa6, 0xd1, 0x86, 0xe5, 0xf9, 0x6e, + 0x47, 0xdc, 0xdd, 0x0f, 0xf3, 0x51, 0xfe, 0x4b, 0x81, 0xd3, 0x03, 0xa8, 0x3e, 0x02, 0x97, 0xfd, + 0xb3, 0x30, 0xee, 0xb6, 0x6d, 0xdb, 0xb2, 0x1b, 0xfa, 0x23, 0xa7, 0x16, 0x1c, 0x93, 0xe2, 0x12, + 0x32, 0x0e, 0xc4, 0x89, 0x35, 0x79, 0xc1, 0xed, 0x96, 0x53, 0xf3, 0x4a, 0xb3, 0x90, 0xbc, 0xe5, + 0xd4, 0x7a, 0x55, 0x50, 0xbd, 0x04, 0x85, 0x5b, 0x4e, 0x2d, 0x2a, 0x9a, 0x59, 0xc8, 0x3c, 0x72, + 0x6a, 0xdd, 0x19, 0x4d, 0x3f, 0x72, 0x6a, 0xab, 0xa6, 0xba, 0x02, 0xd3, 0x52, 0x53, 0x21, 0x8f, + 0x97, 0x21, 0xf9, 0xc8, 0xa9, 0x89, 0xb5, 0x3d, 0xdf, 0x63, 0xb3, 0xf1, 0xef, 0x12, 0xf0, 0xbf, + 0x51, 0x80, 0x80, 0x58, 0xd3, 0xcb, 0x57, 0x01, 0xba, 0xc9, 0x6d, 0x64, 0x06, 0x0a, 0x37, 0xee, + 0x6a, 0x77, 0x1f, 0x6c, 0xac, 0xde, 0x59, 0xd1, 0xd7, 0x37, 0x16, 0x96, 0x6e, 0xaf, 0x17, 0x4e, + 0x90, 0x69, 0x98, 0xd8, 0xb8, 0xa9, 0xad, 0x2c, 0x2c, 0x07, 0x45, 0xca, 0xe5, 0x67, 0x21, 0x1b, + 0x64, 0x8e, 0x75, 0xb3, 0xaa, 0xc8, 0x24, 0x40, 0x48, 0xbe, 0x5e, 0x50, 0xae, 0xfc, 0xcd, 0x73, + 0x90, 0x11, 0xde, 0xdc, 0x77, 0x14, 0x18, 0x97, 0x1f, 0x15, 0x93, 0xca, 0x68, 0xcf, 0x86, 0x03, + 0x31, 0x94, 0xaa, 0x23, 0xb7, 0xe7, 0xb2, 0x50, 0x9f, 0x7f, 0xff, 0x9f, 0xfe, 0xe3, 0x37, 0x12, + 0x1f, 0x23, 0xe5, 0xaa, 0xf0, 0x34, 0xab, 0xf2, 0x9b, 0xe3, 0xea, 0x7b, 0x42, 0x71, 0x9e, 0x90, + 0x5f, 0x54, 0x60, 0x2c, 0xf0, 0x80, 0xe3, 0xf2, 0x5b, 0xa2, 0x4f, 0x94, 0x4b, 0x97, 0x47, 0x69, + 0x2a, 0xb0, 0xa8, 0x88, 0xe5, 0x2c, 0x29, 0x85, 0x58, 0x4c, 0xde, 0x42, 0x82, 0xe1, 0x42, 0x1a, + 0x5f, 0x94, 0x92, 0xe7, 0x87, 0xbf, 0x39, 0xe5, 0x08, 0x2e, 0x8e, 0xfa, 0x38, 0x55, 0x9d, 0xc3, + 0xfe, 0x0b, 0x64, 0x32, 0xec, 0x9f, 0x3f, 0x7e, 0xfd, 0x02, 0xa4, 0x30, 0xa3, 0xed, 0xc2, 0x10, + 0x4e, 0x41, 0x8f, 0x87, 0x7a, 0x67, 0xab, 0x9e, 0xc3, 0x5e, 0x4b, 0xa4, 0x18, 0xed, 0x55, 0x1a, + 0xf3, 0x13, 0xfe, 0x30, 0x14, 0xb3, 0x98, 0xc8, 0x0b, 0xa3, 0xe5, 0x3a, 0x1d, 0x8c, 0xe4, 0xc0, + 0xc4, 0x28, 0x75, 0x16, 0x91, 0x4c, 0x91, 0x89, 0x10, 0x89, 0x6b, 0x6c, 0xfa, 0xe4, 0x4b, 0x0a, + 0x64, 0x78, 0x64, 0x81, 0x0c, 0x7d, 0x14, 0x14, 0x4a, 0xfd, 0xd2, 0x08, 0x2d, 0x45, 0xb7, 0x1f, + 0xc3, 0x6e, 0xcf, 0x90, 0xd3, 0x52, 0xb7, 0xac, 0x81, 0x24, 0x01, 0x0f, 0x32, 0xfc, 0x65, 0x47, + 0x2c, 0x82, 0xc8, 0xe3, 0x8f, 0x92, 0x9c, 0x8c, 0x2b, 0xfe, 0x0c, 0x09, 0x3b, 0xb8, 0x0a, 0xa9, + 0xf7, 0x77, 0x2a, 0xfe, 0x62, 0x49, 0xb7, 0xd3, 0x6f, 0x2b, 0x90, 0x97, 0x9e, 0x24, 0x90, 0x97, + 0x46, 0x7b, 0xba, 0x10, 0xf4, 0x5f, 0x19, 0xb5, 0xb9, 0x10, 0xc3, 0x05, 0x44, 0x74, 0x8e, 0xcc, + 0x87, 0x88, 0xf8, 0x0d, 0x26, 0x7a, 0x8a, 0x12, 0xac, 0x6f, 0x2a, 0x90, 0x0b, 0x73, 0xc6, 0x63, + 0xd5, 0xa1, 0x37, 0x53, 0x3e, 0x56, 0x1d, 0xfa, 0xd2, 0xd8, 0xd5, 0x4b, 0x08, 0xe8, 0x3c, 0xf9, + 0x58, 0x08, 0xc8, 0x08, 0xda, 0xa0, 0x8a, 0x4a, 0x98, 0xbe, 0xab, 0xc0, 0x64, 0xf4, 0x4d, 0x01, + 0x79, 0x79, 0xa4, 0xbe, 0xa4, 0x30, 0x54, 0xe9, 0x95, 0x43, 0x50, 0x08, 0x88, 0x2f, 0x20, 0xc4, + 0xe7, 0xc8, 0xf9, 0x01, 0x10, 0x51, 0x89, 0xaa, 0xef, 0x05, 0x01, 0xa5, 0x27, 0xe4, 0x2b, 0x0a, + 0x8c, 0xcb, 0x39, 0x0d, 0xb1, 0xc6, 0x75, 0x40, 0x6a, 0x53, 0xac, 0x71, 0x1d, 0x94, 0xb3, 0xa1, + 0x9e, 0x46, 0x78, 0x27, 0xc9, 0x74, 0x08, 0x2f, 0x4c, 0xc4, 0xf8, 0x4d, 0x91, 0x73, 0x82, 0x8f, + 0xd1, 0x3e, 0x3a, 0x44, 0x65, 0x44, 0x74, 0x9a, 0x9c, 0x0a, 0x11, 0xe1, 0xd3, 0x3a, 0x5d, 0xc6, + 0x95, 0x97, 0x52, 0x2c, 0x62, 0x95, 0xbe, 0x3f, 0xfb, 0x23, 0x56, 0xe9, 0x07, 0x64, 0x6e, 0x0c, + 0xda, 0x7e, 0xb0, 0x15, 0x4f, 0x2a, 0x92, 0x34, 0xec, 0x77, 0x14, 0x98, 0x88, 0xa4, 0x4d, 0x90, + 0xea, 0xd0, 0xae, 0xa2, 0xb9, 0x1d, 0xa5, 0x97, 0x47, 0x27, 0x38, 0x70, 0x05, 0x08, 0x74, 0x42, + 0x5c, 0x12, 0xbe, 0x2f, 0x29, 0x90, 0x0b, 0x93, 0x15, 0x62, 0x57, 0x65, 0x6f, 0xc2, 0x46, 0xec, + 0xaa, 0xec, 0xcb, 0x7f, 0x50, 0x8b, 0x88, 0x89, 0xa8, 0x5d, 0x23, 0xed, 0xb5, 0x0c, 0xfb, 0x0d, + 0xe5, 0x32, 0xf9, 0x02, 0xba, 0x11, 0xf5, 0xad, 0x78, 0x33, 0x1d, 0xc9, 0xfa, 0x2f, 0xc5, 0xed, + 0xa2, 0xf2, 0xe3, 0x93, 0x01, 0xf6, 0xd2, 0x43, 0x46, 0x92, 0x08, 0x7e, 0x5e, 0x81, 0x31, 0x91, + 0x5d, 0x1e, 0xeb, 0x21, 0x44, 0x33, 0xd0, 0x47, 0x87, 0xd0, 0xef, 0x1e, 0xb4, 0x38, 0xa7, 0x1e, + 0x0c, 0x22, 0x65, 0x3d, 0x16, 0x43, 0x34, 0xad, 0xfd, 0x38, 0x18, 0xb6, 0x39, 0x27, 0x09, 0xc3, + 0x2f, 0x29, 0x90, 0x0d, 0x9e, 0x00, 0x90, 0x38, 0xff, 0xa7, 0xe7, 0x15, 0x43, 0xe9, 0x85, 0x91, + 0xda, 0x0a, 0x24, 0xfd, 0x6e, 0x03, 0x86, 0xee, 0xa2, 0xfb, 0xd7, 0xb8, 0xfc, 0x6a, 0x25, 0xde, + 0xba, 0xf4, 0x3f, 0x87, 0x89, 0xb7, 0x2e, 0x03, 0x9e, 0xc3, 0xa8, 0xe7, 0x11, 0xd3, 0x33, 0xe4, + 0x8c, 0x64, 0x5d, 0x1a, 0xbd, 0xb0, 0xbe, 0xae, 0xc0, 0x98, 0xa0, 0x8e, 0x9d, 0xa2, 0xe8, 0xf3, + 0x98, 0xd2, 0x4b, 0xf1, 0x4d, 0x7b, 0x1e, 0x07, 0xa9, 0x97, 0x11, 0xca, 0xb3, 0x44, 0x8d, 0x81, + 0x52, 0x7d, 0x8f, 0x15, 0x3c, 0x61, 0xfe, 0xdd, 0x9a, 0xd3, 0xf0, 0x62, 0xfd, 0x3b, 0xe9, 0x91, + 0xd5, 0x61, 0xa1, 0x0c, 0xb2, 0xb9, 0x0d, 0x59, 0x22, 0xdf, 0x52, 0xf0, 0x6f, 0x46, 0x74, 0x6f, + 0x63, 0x63, 0x6d, 0xdb, 0xa0, 0xc4, 0xa2, 0x58, 0xdb, 0x36, 0xf0, 0xa2, 0x57, 0x9d, 0x47, 0x54, + 0x45, 0x32, 0x27, 0xaf, 0x26, 0xd6, 0x4e, 0xe4, 0xb4, 0xbf, 0xaf, 0x40, 0x2e, 0xbc, 0x52, 0x8a, + 0x35, 0x68, 0xbd, 0x37, 0xc2, 0xb1, 0x06, 0xad, 0xef, 0x96, 0x4a, 0x2d, 0x21, 0x90, 0x19, 0x42, + 0x42, 0x20, 0x0f, 0x1d, 0x5f, 0x80, 0x78, 0x02, 0x69, 0xee, 0x4d, 0x3c, 0x3f, 0xfc, 0x96, 0x60, + 0xb8, 0xb7, 0x1f, 0xf5, 0x1d, 0x0e, 0x70, 0x3b, 0x65, 0x8f, 0xe1, 0xb7, 0x14, 0xc8, 0xcb, 0xb1, + 0x90, 0xb8, 0x89, 0xef, 0x8f, 0x3b, 0x0c, 0x9a, 0x94, 0xc8, 0x5f, 0xa7, 0x93, 0x68, 0x78, 0x50, + 0x66, 0x80, 0x0f, 0x28, 0x11, 0x44, 0x77, 0x9b, 0x0c, 0x0f, 0xb0, 0x0f, 0xb1, 0xf5, 0xd2, 0x0d, + 0x47, 0xac, 0x4b, 0x1e, 0x8d, 0xd6, 0x0f, 0xb4, 0xf6, 0xac, 0x81, 0x04, 0xe1, 0x97, 0x15, 0x3c, + 0x13, 0x07, 0x51, 0xed, 0x17, 0x47, 0x0c, 0xb1, 0x0e, 0x5f, 0x41, 0xfd, 0x01, 0x59, 0xf5, 0x0c, + 0xc2, 0x99, 0x25, 0x27, 0xe5, 0xcd, 0x27, 0xe8, 0xf9, 0x87, 0x0a, 0x9c, 0x1b, 0x16, 0xd9, 0x23, + 0x8b, 0x71, 0x7b, 0xff, 0x68, 0x61, 0xc7, 0xd2, 0xd2, 0xb1, 0x78, 0x44, 0x4d, 0xa4, 0x5a, 0x94, + 0x86, 0xb2, 0xed, 0xb3, 0x59, 0x16, 0x91, 0x38, 0xb6, 0x93, 0xff, 0xa5, 0x72, 0x50, 0xdc, 0x09, + 0x91, 0x78, 0xe4, 0xcd, 0x23, 0xc5, 0xfc, 0x42, 0xf1, 0xbf, 0x75, 0x54, 0xf2, 0x03, 0xf7, 0x9e, + 0x9e, 0x41, 0x90, 0x3f, 0x3f, 0x28, 0x9c, 0xf8, 0xf1, 0x43, 0x77, 0xcd, 0x21, 0xbf, 0x7e, 0x68, + 0x3a, 0x81, 0xf5, 0x35, 0xc4, 0x5a, 0x21, 0x2f, 0xf6, 0x61, 0xad, 0xbe, 0x77, 0x50, 0x54, 0xf0, + 0x09, 0xf9, 0x9e, 0x82, 0x81, 0xa3, 0x68, 0xa0, 0x8a, 0xbc, 0x7a, 0xb8, 0xb0, 0x16, 0x47, 0xfe, + 0xda, 0x51, 0x62, 0x61, 0x03, 0x1c, 0xe3, 0x47, 0x4e, 0x4d, 0x77, 0x45, 0xe3, 0xa8, 0xb7, 0x91, + 0x0b, 0x43, 0x5c, 0xb1, 0x76, 0xba, 0x37, 0x66, 0x16, 0x6b, 0xa7, 0xfb, 0xa2, 0x66, 0xea, 0x33, + 0x88, 0xe8, 0x14, 0x99, 0x95, 0x11, 0x55, 0xdf, 0xe3, 0x51, 0xb7, 0x27, 0x8b, 0x97, 0x7f, 0xf0, + 0xef, 0xf3, 0x27, 0x7e, 0xb0, 0x3f, 0xaf, 0xfc, 0x70, 0x7f, 0x5e, 0xf9, 0xd1, 0xfe, 0xbc, 0xf2, + 0x6f, 0xfb, 0xf3, 0xca, 0x37, 0x3e, 0x98, 0x3f, 0xf1, 0xc3, 0x0f, 0xe6, 0x4f, 0xfc, 0xe8, 0x83, + 0xf9, 0x13, 0xef, 0x64, 0x03, 0xe6, 0xb5, 0x0c, 0x06, 0x7c, 0x5f, 0xfd, 0xdf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0x83, 0xbe, 0x44, 0x71, 0x54, 0x00, 0x00, } diff --git a/pkg/server/serverpb/status.pb.gw.go b/pkg/server/serverpb/status.pb.gw.go index 38e9da0050c9..66d381cbb4e9 100644 --- a/pkg/server/serverpb/status.pb.gw.go +++ b/pkg/server/serverpb/status.pb.gw.go @@ -1127,6 +1127,10 @@ func local_request_Status_LogFilesList_0(ctx context.Context, marshaler runtime. } +var ( + filter_Status_LogFile_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0, "file": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + func request_Status_LogFile_0(ctx context.Context, marshaler runtime.Marshaler, client StatusClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq LogFileRequest var metadata runtime.ServerMetadata @@ -1160,6 +1164,13 @@ func request_Status_LogFile_0(ctx context.Context, marshaler runtime.Marshaler, return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "file", err) } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Status_LogFile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.LogFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err @@ -1198,6 +1209,13 @@ func local_request_Status_LogFile_0(ctx context.Context, marshaler runtime.Marsh return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "file", err) } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Status_LogFile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.LogFile(ctx, &protoReq) return msg, metadata, err diff --git a/pkg/server/serverpb/status.proto b/pkg/server/serverpb/status.proto index a54f1539b128..42cdf68f9f38 100644 --- a/pkg/server/serverpb/status.proto +++ b/pkg/server/serverpb/status.proto @@ -299,6 +299,18 @@ message LogsRequest { string end_time = 4; string max = 5; string pattern = 6; + // redact, if true, requests redaction of sensitive data away + // from the retrieved log entries. + // Only admin users can send a request with redact = false. + bool redact = 7; + // keep_redactable, if true, requests that retrieved entries preserve + // the redaction markers if any were present in the log files. + // If false, redaction markers are stripped away. + // Note that redact = false && redactable = false implies + // "flat" entries with all sensitive information enclosed and + // no markers; this is suitable for backward-compatibility with + // RPC clients from prior the introduction of redactable logs. + bool keep_redactable = 8; } message LogEntriesResponse { @@ -322,6 +334,18 @@ message LogFileRequest { // forwarding is necessary. string node_id = 1; string file = 2; + // redact, if true, requests redaction of sensitive data away + // from the retrieved log entries. + // Only admin users can send a request with redact = false. + bool redact = 3; + // keep_redactable, if true, requests that retrieved entries preserve + // the redaction markers if any were present in the log files. + // If false, redaction markers are stripped away. + // Note that redact = false && redactable = false implies + // "flat" entries with all sensitive information enclosed and + // no markers; this is suitable for backward-compatibility with + // RPC clients from prior the introduction of redactable logs. + bool keep_redactable = 4; } enum StacksType { diff --git a/pkg/server/settingsworker.go b/pkg/server/settingsworker.go index d7c6725edcfb..89023187e47a 100644 --- a/pkg/server/settingsworker.go +++ b/pkg/server/settingsworker.go @@ -28,7 +28,7 @@ import ( // RefreshSettings starts a settings-changes listener. func (s *Server) refreshSettings() { - tbl := &sqlbase.SettingsTable + tbl := sqlbase.SettingsTable.TableDesc() a := &sqlbase.DatumAlloc{} codec := keys.TODOSQLCodec diff --git a/pkg/server/status.go b/pkg/server/status.go index af0d578a5901..5f3e61782a95 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -791,16 +791,17 @@ func (s *statusServer) LogFilesList( func (s *statusServer) LogFile( ctx context.Context, req *serverpb.LogFileRequest, ) (*serverpb.LogEntriesResponse, error) { - if _, err := s.admin.requireAdminUser(ctx); err != nil { - return nil, err - } - + ctx = propagateGatewayMetadata(ctx) + ctx = s.AnnotateCtx(ctx) if !debug.GatewayRemoteAllowed(ctx, s.st) { return nil, remoteDebuggingErr } - ctx = propagateGatewayMetadata(ctx) - ctx = s.AnnotateCtx(ctx) + _, isAdmin, err := s.admin.getUserAndRole(ctx) + if err != nil { + return nil, err + } + nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { return nil, grpcstatus.Errorf(codes.InvalidArgument, err.Error()) @@ -813,7 +814,18 @@ func (s *statusServer) LogFile( return status.LogFile(ctx, req) } + // Determine whether the request is valid for the current user. + if !isAdmin && !req.Redact { + return nil, errInsufficientPrivilege + } + + // Determine how to redact. + inputEditMode := log.SelectEditMode(req.Redact, req.KeepRedactable) + + // Ensure that the latest log entries are available in files. log.Flush() + + // Read the logs. reader, err := log.GetLogReader(req.File, true /* restricted */) if reader == nil || err != nil { return nil, fmt.Errorf("log file %s could not be opened: %s", req.File, err) @@ -822,7 +834,7 @@ func (s *statusServer) LogFile( var entry log.Entry var resp serverpb.LogEntriesResponse - decoder := log.NewEntryDecoder(reader) + decoder := log.NewEntryDecoder(reader, inputEditMode) for { if err := decoder.Decode(&entry); err != nil { if err == io.EOF { @@ -833,6 +845,13 @@ func (s *statusServer) LogFile( resp.Entries = append(resp.Entries, entry) } + // Erase the redactable bit if requested by client. + if !req.KeepRedactable { + for i := range resp.Entries { + resp.Entries[i].Redactable = false + } + } + return &resp, nil } @@ -870,15 +889,17 @@ func parseInt64WithDefault(s string, defaultValue int64) (int64, error) { func (s *statusServer) Logs( ctx context.Context, req *serverpb.LogsRequest, ) (*serverpb.LogEntriesResponse, error) { - if _, err := s.admin.requireAdminUser(ctx); err != nil { - return nil, err - } + ctx = propagateGatewayMetadata(ctx) + ctx = s.AnnotateCtx(ctx) if !debug.GatewayRemoteAllowed(ctx, s.st) { return nil, remoteDebuggingErr } - ctx = propagateGatewayMetadata(ctx) - ctx = s.AnnotateCtx(ctx) + _, isAdmin, err := s.admin.getUserAndRole(ctx) + if err != nil { + return nil, err + } + nodeID, local, err := s.parseNodeID(req.NodeId) if err != nil { return nil, grpcstatus.Errorf(codes.InvalidArgument, err.Error()) @@ -891,8 +912,15 @@ func (s *statusServer) Logs( return status.Logs(ctx, req) } - log.Flush() + // Determine whether the request is valid for the current user. + if !isAdmin && !req.Redact { + return nil, errInsufficientPrivilege + } + + // Determine how to redact. + inputEditMode := log.SelectEditMode(req.Redact, req.KeepRedactable) + // Select the time interval. startTimestamp, err := parseInt64WithDefault( req.StartTime, timeutil.Now().AddDate(0, 0, -1).UnixNano()) @@ -924,11 +952,23 @@ func (s *statusServer) Logs( } } - entries, err := log.FetchEntriesFromFiles(startTimestamp, endTimestamp, int(maxEntries), regex) + // Ensure that the latest log entries are available in files. + log.Flush() + + // Read the logs. + entries, err := log.FetchEntriesFromFiles( + startTimestamp, endTimestamp, int(maxEntries), regex, inputEditMode) if err != nil { return nil, err } + // Erase the redactable bit if requested by client. + if !req.KeepRedactable { + for i := range entries { + entries[i].Redactable = false + } + } + return &serverpb.LogEntriesResponse{Entries: entries}, nil } diff --git a/pkg/server/status/runtime.go b/pkg/server/status/runtime.go index 429c2d47aad2..3ce7f6b6de2a 100644 --- a/pkg/server/status/runtime.go +++ b/pkg/server/status/runtime.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" + "github.com/cockroachdb/cockroach/pkg/util/redact" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/dustin/go-humanize" "github.com/elastic/gosigar" @@ -496,7 +497,11 @@ func (rsr *RuntimeStatSampler) SampleEnvironment(ctx context.Context, ms GoMemSt staleMsg = "(stale)" } goTotal := ms.Sys - ms.HeapReleased - log.Infof(ctx, "%s", log.Safe(fmt.Sprintf("runtime stats: %s RSS, %d goroutines, %s/%s/%s GO alloc/idle/total%s, "+ + + // TODO(knz): make utility wrapper around humanize.IBytes that + // returns a safe value and collapse the entire log.Infof -> Safe -> + // Sprintf sequence as a flat Infof call. + log.Infof(ctx, "%s", redact.Safe(fmt.Sprintf("runtime stats: %s RSS, %d goroutines, %s/%s/%s GO alloc/idle/total%s, "+ "%s/%s CGO alloc/total, %.1f CGO/sec, %.1f/%.1f %%(u/s)time, %.1f %%gc (%dx), "+ "%s/%s (r/w)net", humanize.IBytes(mem.Resident), numGoroutine, diff --git a/pkg/server/status_test.go b/pkg/server/status_test.go index a9c8ec23c70e..a70eb8f581ee 100644 --- a/pkg/server/status_test.go +++ b/pkg/server/status_test.go @@ -57,6 +57,7 @@ import ( "github.com/cockroachdb/errors" "github.com/gogo/protobuf/proto" "github.com/kr/pretty" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -522,7 +523,7 @@ func TestStatusLocalLogs(t *testing.T) { if err := getStatusJSONProto(ts, "logfiles/local", &wrapper); err != nil { t.Fatal(err) } - if a, e := len(wrapper.Files), 1; a != e { + if a, e := len(wrapper.Files), 2; a != e { t.Fatalf("expected %d log files; got %d", e, a) } @@ -630,6 +631,145 @@ func TestStatusLocalLogs(t *testing.T) { } } +// TestStatusLogRedaction checks that the log file retrieval RPCs +// honor the redaction flags. +func TestStatusLogRedaction(t *testing.T) { + defer leaktest.AfterTest(t)() + + testData := []struct { + redactableLogs bool // logging flag + redact bool // RPC request flag + keepRedactable bool // RPC request flag + expectedMessage string + expectedRedactable bool // redactable bit in result entries + }{ + // Note: all 2^3 combinations of (redactableLogs, redact, + // keepRedactable) must be tested below. + + // redact=false, keepredactable=false results in an unsafe "flat" + // format regardless of whether there were markers in the log + // file. + {false, false, false, `THISISSAFE THISISUNSAFE`, false}, + // keepredactable=true, if there were no markers to start with + // (redactableLogs=false), introduces markers around the entire + // message to indicate it's not known to be safe. + {false, false, true, `‹THISISSAFE THISISUNSAFE›`, true}, + // redact=true must be conservative and redact everything out if + // there were no markers to start with (redactableLogs=false). + {false, true, false, `‹×›`, false}, + {false, true, true, `‹×›`, false}, + // redact=false, keepredactable=false results in an unsafe "flat" + // format regardless of whether there were markers in the log + // file. + {true, false, false, `THISISSAFE THISISUNSAFE`, false}, + // keepredactable=true, redact=false, keeps whatever was in the + // log file. + {true, false, true, `THISISSAFE ‹THISISUNSAFE›`, true}, + // if there were markers in the log to start with, redact=true + // removes only the unsafe information. + {true, true, false, `THISISSAFE ‹×›`, false}, + // Whether or not to keep the redactable markers has no influence + // on the output of redaction, just on the presence of the + // "redactable" marker. In any case no information is leaked. + {true, true, true, `THISISSAFE ‹×›`, true}, + } + + testutils.RunTrueAndFalse(t, "redactableLogs", + func(t *testing.T, redactableLogs bool) { + s := log.ScopeWithoutShowLogs(t) + defer s.Close(t) + + // Apply the redactable log boolean for this test. + defer log.TestingSetRedactable(redactableLogs)() + + ts := startServer(t) + defer ts.Stopper().Stop(context.Background()) + + // Log something. + log.Infof(context.Background(), "THISISSAFE %s", "THISISUNSAFE") + + // Determine the log file name. + var wrapper serverpb.LogFilesListResponse + if err := getStatusJSONProto(ts, "logfiles/local", &wrapper); err != nil { + t.Fatal(err) + } + // We expect a main log and a stderr log. + if a, e := len(wrapper.Files), 2; a != e { + t.Fatalf("expected %d log files; got %d: %+v", e, a, wrapper.Files) + } + var file log.FileInfo + // Find the main log. + for _, f := range wrapper.Files { + if !strings.Contains("stderr", f.Name) { + file = f + break + } + } + + for _, tc := range testData { + if tc.redactableLogs != redactableLogs { + continue + } + t.Run(fmt.Sprintf("redact=%v,keepredactable=%v", tc.redact, tc.keepRedactable), + func(t *testing.T) { + // checkEntries asserts that the redaction results are + // those expected in tc. + checkEntries := func(entries []log.Entry) { + foundMessage := false + for _, entry := range entries { + if !strings.HasSuffix(entry.File, "status_test.go") { + continue + } + foundMessage = true + + assert.Equal(t, tc.expectedMessage, entry.Message) + } + if !foundMessage { + t.Fatalf("did not find expected message from test in log") + } + } + + // Retrieve the log entries with the configured flags using + // the LogFiles() RPC. + logFilesURL := fmt.Sprintf("logfiles/local/%s?redact=%v&keep_redactable=%v", + file.Name, tc.redact, tc.keepRedactable) + var wrapper serverpb.LogEntriesResponse + if err := getStatusJSONProto(ts, logFilesURL, &wrapper); err != nil { + t.Fatal(err) + } + checkEntries(wrapper.Entries) + + // If the test specifies redact=false, check that a non-admin + // user gets a privilege error. + if !tc.redact { + err := getStatusJSONProtoWithAdminOption(ts, logFilesURL, &wrapper, false /* isAdmin */) + if !testutils.IsError(err, "status: 403") { + t.Fatalf("expected privilege error, got %v", err) + } + } + + // Retrieve the log entries using the Logs() RPC. + logsURL := fmt.Sprintf("logs/local?redact=%v&keep_redactable=%v", + tc.redact, tc.keepRedactable) + var wrapper2 serverpb.LogEntriesResponse + if err := getStatusJSONProto(ts, logsURL, &wrapper2); err != nil { + t.Fatal(err) + } + checkEntries(wrapper2.Entries) + + // If the test specifies redact=false, check that a non-admin + // user gets a privilege error. + if !tc.redact { + err := getStatusJSONProtoWithAdminOption(ts, logsURL, &wrapper2, false /* isAdmin */) + if !testutils.IsError(err, "status: 403") { + t.Fatalf("expected privilege error, got %v", err) + } + } + }) + } + }) +} + // TestNodeStatusResponse verifies that node status returns the expected // results. func TestNodeStatusResponse(t *testing.T) { diff --git a/pkg/sql/add_column.go b/pkg/sql/add_column.go new file mode 100644 index 000000000000..b617fcb64c9e --- /dev/null +++ b/pkg/sql/add_column.go @@ -0,0 +1,149 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/schemaexpr" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/errors" +) + +// addColumnImpl performs the logic of adding a column within an ALTER TABLE. +func (p *planner) addColumnImpl( + params runParams, + n *alterTableNode, + tn *tree.TableName, + desc *sqlbase.MutableTableDescriptor, + t *tree.AlterTableAddColumn, +) error { + d := t.ColumnDef + version := params.ExecCfg().Settings.Version.ActiveVersionOrEmpty(params.ctx) + toType, err := tree.ResolveType(params.ctx, d.Type, params.p.semaCtx.GetTypeResolver()) + if err != nil { + return err + } + if supported, err := isTypeSupportedInVersion(version, toType); err != nil { + return err + } else if !supported { + return pgerror.Newf( + pgcode.FeatureNotSupported, + "type %s is not supported until version upgrade is finalized", + toType.SQLString(), + ) + } + + newDef, seqDbDesc, seqName, seqOpts, err := params.p.processSerialInColumnDef(params.ctx, d, tn) + if err != nil { + return err + } + if seqName != nil { + if err := doCreateSequence( + params, + n.n.String(), + seqDbDesc, + n.tableDesc.GetParentSchemaID(), + seqName, + n.tableDesc.Temporary, + seqOpts, + tree.AsStringWithFQNames(n.n, params.Ann()), + ); err != nil { + return err + } + } + d = newDef + incTelemetryForNewColumn(d) + + col, idx, expr, err := sqlbase.MakeColumnDefDescs(params.ctx, d, ¶ms.p.semaCtx, params.EvalContext()) + if err != nil { + return err + } + // If the new column has a DEFAULT expression that uses a sequence, add references between + // its descriptor and this column descriptor. + if d.HasDefaultExpr() { + changedSeqDescs, err := maybeAddSequenceDependencies( + params.ctx, params.p, n.tableDesc, col, expr, nil, + ) + if err != nil { + return err + } + for _, changedSeqDesc := range changedSeqDescs { + if err := params.p.writeSchemaChange( + params.ctx, changedSeqDesc, sqlbase.InvalidMutationID, tree.AsStringWithFQNames(n.n, params.Ann()), + ); err != nil { + return err + } + } + } + + // We're checking to see if a user is trying add a non-nullable column without a default to a + // non empty table by scanning the primary index span with a limit of 1 to see if any key exists. + if !col.Nullable && (col.DefaultExpr == nil && !col.IsComputed()) { + span := n.tableDesc.PrimaryIndexSpan(params.ExecCfg().Codec) + kvs, err := params.p.txn.Scan(params.ctx, span.Key, span.EndKey, 1) + if err != nil { + return err + } + if len(kvs) > 0 { + return sqlbase.NewNonNullViolationError(col.Name) + } + } + _, err = n.tableDesc.FindActiveColumnByName(string(d.Name)) + if m := n.tableDesc.FindColumnMutationByName(d.Name); m != nil { + switch m.Direction { + case sqlbase.DescriptorMutation_ADD: + return pgerror.Newf(pgcode.DuplicateColumn, + "duplicate: column %q in the middle of being added, not yet public", + col.Name) + case sqlbase.DescriptorMutation_DROP: + return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, + "column %q being dropped, try again later", col.Name) + default: + if err != nil { + return errors.AssertionFailedf( + "mutation in state %s, direction %s, and no column descriptor", + errors.Safe(m.State), errors.Safe(m.Direction)) + } + } + } + if err == nil { + if t.IfNotExists { + return nil + } + return sqlbase.NewColumnAlreadyExistsError(string(d.Name), n.tableDesc.Name) + } + + n.tableDesc.AddColumnMutation(col, sqlbase.DescriptorMutation_ADD) + if idx != nil { + if err := n.tableDesc.AddIndexMutation(idx, sqlbase.DescriptorMutation_ADD); err != nil { + return err + } + } + if d.HasColumnFamily() { + err := n.tableDesc.AddColumnToFamilyMaybeCreate( + col.Name, string(d.Family.Name), d.Family.Create, + d.Family.IfNotExists) + if err != nil { + return err + } + } + + if d.IsComputed() { + computedColValidator := schemaexpr.NewComputedColumnValidator(params.ctx, n.tableDesc, ¶ms.p.semaCtx) + if err := computedColValidator.Validate(d); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 08abe30c4a56..d39c917c09cf 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -144,124 +144,13 @@ func (n *alterTableNode) startExec(params runParams) error { switch t := cmd.(type) { case *tree.AlterTableAddColumn: - d := t.ColumnDef - version := params.ExecCfg().Settings.Version.ActiveVersionOrEmpty(params.ctx) - toType, err := tree.ResolveType(params.ctx, d.Type, params.p.semaCtx.GetTypeResolver()) - if err != nil { - return err - } - if supported, err := isTypeSupportedInVersion(version, toType); err != nil { - return err - } else if !supported { - return pgerror.Newf( - pgcode.FeatureNotSupported, - "type %s is not supported until version upgrade is finalized", - toType.SQLString(), - ) - } - - newDef, seqDbDesc, seqName, seqOpts, err := params.p.processSerialInColumnDef(params.ctx, d, tn) - if err != nil { - return err - } - if seqName != nil { - if err := doCreateSequence( - params, - n.n.String(), - seqDbDesc, - n.tableDesc.GetParentSchemaID(), - seqName, - n.tableDesc.Temporary, - seqOpts, - tree.AsStringWithFQNames(n.n, params.Ann()), - ); err != nil { - return err - } - } - d = newDef - incTelemetryForNewColumn(d) - - col, idx, expr, err := sqlbase.MakeColumnDefDescs(params.ctx, d, ¶ms.p.semaCtx, params.EvalContext()) + var err error + params.p.runWithOptions(resolveFlags{contextDatabaseID: n.tableDesc.ParentID}, func() { + err = params.p.addColumnImpl(params, n, tn, n.tableDesc, t) + }) if err != nil { return err } - // If the new column has a DEFAULT expression that uses a sequence, add references between - // its descriptor and this column descriptor. - if d.HasDefaultExpr() { - changedSeqDescs, err := maybeAddSequenceDependencies( - params.ctx, params.p, n.tableDesc, col, expr, nil, - ) - if err != nil { - return err - } - for _, changedSeqDesc := range changedSeqDescs { - if err := params.p.writeSchemaChange( - params.ctx, changedSeqDesc, sqlbase.InvalidMutationID, tree.AsStringWithFQNames(n.n, params.Ann()), - ); err != nil { - return err - } - } - } - - // We're checking to see if a user is trying add a non-nullable column without a default to a - // non empty table by scanning the primary index span with a limit of 1 to see if any key exists. - if !col.Nullable && (col.DefaultExpr == nil && !col.IsComputed()) { - span := n.tableDesc.PrimaryIndexSpan(params.ExecCfg().Codec) - kvs, err := params.p.txn.Scan(params.ctx, span.Key, span.EndKey, 1) - if err != nil { - return err - } - if len(kvs) > 0 { - return sqlbase.NewNonNullViolationError(col.Name) - } - } - _, err = n.tableDesc.FindActiveColumnByName(string(d.Name)) - if m := n.tableDesc.FindColumnMutationByName(d.Name); m != nil { - switch m.Direction { - case sqlbase.DescriptorMutation_ADD: - return pgerror.Newf(pgcode.DuplicateColumn, - "duplicate: column %q in the middle of being added, not yet public", - col.Name) - case sqlbase.DescriptorMutation_DROP: - return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "column %q being dropped, try again later", col.Name) - default: - if err != nil { - return errors.AssertionFailedf( - "mutation in state %s, direction %s, and no column descriptor", - errors.Safe(m.State), errors.Safe(m.Direction)) - } - } - } - if err == nil { - if t.IfNotExists { - continue - } - return sqlbase.NewColumnAlreadyExistsError(string(d.Name), n.tableDesc.Name) - } - - n.tableDesc.AddColumnMutation(col, sqlbase.DescriptorMutation_ADD) - if idx != nil { - if err := n.tableDesc.AddIndexMutation(idx, sqlbase.DescriptorMutation_ADD); err != nil { - return err - } - } - if d.HasColumnFamily() { - err := n.tableDesc.AddColumnToFamilyMaybeCreate( - col.Name, string(d.Family.Name), d.Family.Create, - d.Family.IfNotExists) - if err != nil { - return err - } - } - - if d.IsComputed() { - computedColValidator := schemaexpr.NewComputedColumnValidator(params.ctx, n.tableDesc, ¶ms.p.semaCtx) - if err := computedColValidator.Validate(d); err != nil { - return err - } - } - case *tree.AlterTableAddConstraint: switch d := t.ConstraintDef.(type) { case *tree.UniqueConstraintTableDef: @@ -315,24 +204,32 @@ func (n *alterTableNode) startExec(params runParams) error { } case *tree.CheckConstraintTableDef: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) - if err != nil { - return err - } - ckBuilder := schemaexpr.NewCheckConstraintBuilder(params.ctx, *tn, n.tableDesc, ¶ms.p.semaCtx) - for k := range info { - ckBuilder.MarkNameInUse(k) - } - ck, err := ckBuilder.Build(d) + var err error + params.p.runWithOptions(resolveFlags{contextDatabaseID: n.tableDesc.ParentID}, func() { + info, infoErr := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) + if err != nil { + err = infoErr + return + } + ckBuilder := schemaexpr.NewCheckConstraintBuilder(params.ctx, *tn, n.tableDesc, ¶ms.p.semaCtx) + for k := range info { + ckBuilder.MarkNameInUse(k) + } + ck, buildErr := ckBuilder.Build(d) + if buildErr != nil { + err = buildErr + return + } + if t.ValidationBehavior == tree.ValidationDefault { + ck.Validity = sqlbase.ConstraintValidity_Validating + } else { + ck.Validity = sqlbase.ConstraintValidity_Unvalidated + } + n.tableDesc.AddCheckMutation(ck, sqlbase.DescriptorMutation_ADD) + }) if err != nil { return err } - if t.ValidationBehavior == tree.ValidationDefault { - ck.Validity = sqlbase.ConstraintValidity_Validating - } else { - ck.Validity = sqlbase.ConstraintValidity_Unvalidated - } - n.tableDesc.AddCheckMutation(ck, sqlbase.DescriptorMutation_ADD) case *tree.ForeignKeyConstraintTableDef: for _, colName := range d.FromCols { @@ -735,7 +632,7 @@ func (n *alterTableNode) startExec(params runParams) error { case *tree.AlterTableSetAudit: var err error - descriptorChanged, err = params.p.setAuditMode(params.ctx, n.tableDesc.TableDesc(), t.Mode) + descriptorChanged, err = params.p.setAuditMode(params.ctx, n.tableDesc, t.Mode) if err != nil { return err } @@ -850,7 +747,7 @@ func (n *alterTableNode) startExec(params runParams) error { } func (p *planner) setAuditMode( - ctx context.Context, desc *sqlbase.TableDescriptor, auditMode tree.AuditMode, + ctx context.Context, desc *sqlbase.MutableTableDescriptor, auditMode tree.AuditMode, ) (bool, error) { // An auditing config change is itself auditable! // We record the event even if the permission check below fails: diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go index 5da9679f5bb7..9f8a5618df83 100644 --- a/pkg/sql/authorization.go +++ b/pkg/sql/authorization.go @@ -41,11 +41,11 @@ type userRoleMembership map[string]bool type AuthorizationAccessor interface { // CheckPrivilege verifies that the user has `privilege` on `descriptor`. CheckPrivilege( - ctx context.Context, descriptor sqlbase.DescriptorProto, privilege privilege.Kind, + ctx context.Context, descriptor sqlbase.DescriptorInterface, privilege privilege.Kind, ) error // CheckAnyPrivilege returns nil if user has any privileges at all. - CheckAnyPrivilege(ctx context.Context, descriptor sqlbase.DescriptorProto) error + CheckAnyPrivilege(ctx context.Context, descriptor sqlbase.DescriptorInterface) error // HasAdminRole returns tuple of bool and error: // (true, nil) means that the user has an admin role (i.e. root or node) @@ -69,7 +69,7 @@ var _ AuthorizationAccessor = &planner{} // CheckPrivilege implements the AuthorizationAccessor interface. // Requires a valid transaction to be open. func (p *planner) CheckPrivilege( - ctx context.Context, descriptor sqlbase.DescriptorProto, privilege privilege.Kind, + ctx context.Context, descriptor sqlbase.DescriptorInterface, privilege privilege.Kind, ) error { // Verify that the txn is valid in any case, so that // we don't get the risk to say "OK" to root requests @@ -118,7 +118,9 @@ func (p *planner) CheckPrivilege( // CheckAnyPrivilege implements the AuthorizationAccessor interface. // Requires a valid transaction to be open. -func (p *planner) CheckAnyPrivilege(ctx context.Context, descriptor sqlbase.DescriptorProto) error { +func (p *planner) CheckAnyPrivilege( + ctx context.Context, descriptor sqlbase.DescriptorInterface, +) error { // Verify that the txn is valid in any case, so that // we don't get the risk to say "OK" to root requests // with an invalid API usage. diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 5b2045512b06..06b689dcee55 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -637,8 +637,8 @@ func (sc *SchemaChanger) truncateIndexes( // Hydrate types used in the retrieved table. // TODO (rohany): This can be removed once table access from the // desc.Collection returns tables with hydrated types. - typLookup := func(id sqlbase.ID) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { - return resolver.ResolveTypeDescByID(ctx, txn, sc.execCfg.Codec, id) + typLookup := func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { + return resolver.ResolveTypeDescByID(ctx, txn, sc.execCfg.Codec, id, tree.ObjectLookupFlags{}) } if err := sqlbase.HydrateTypesInTableDescriptor(tableDesc.TableDesc(), typLookup); err != nil { return err @@ -1606,7 +1606,7 @@ func validateCheckInTxn( if err != nil { return err } - return validateCheckExpr(ctx, semaCtx, check.Expr, tableDesc.TableDesc(), ie, txn) + return validateCheckExpr(ctx, semaCtx, check.Expr, tableDesc, ie, txn) } // validateFkInTxn validates foreign key constraints within the provided diff --git a/pkg/sql/catalog/accessor.go b/pkg/sql/catalog/accessor.go index 823c0b5037ab..8b2c55fa1973 100644 --- a/pkg/sql/catalog/accessor.go +++ b/pkg/sql/catalog/accessor.go @@ -22,10 +22,11 @@ import ( // Accessor provides access to sql object descriptors. type Accessor interface { + // GetDatabaseDesc looks up a database by name and returns its // descriptor. If the database is not found and required is true, // an error is returned; otherwise a nil reference is returned. - GetDatabaseDesc(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbName string, flags tree.DatabaseLookupFlags) (*sqlbase.DatabaseDescriptor, error) + GetDatabaseDesc(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbName string, flags tree.DatabaseLookupFlags) (sqlbase.DatabaseDescriptorInterface, error) // IsValidSchema returns true and the SchemaID if the given schema name is valid for the given database. IsValidSchema(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, scName string) (bool, sqlbase.ID, error) @@ -34,7 +35,12 @@ type Accessor interface { // database and schema. // TODO(solon): when separate schemas are supported, this // API should be extended to use schema descriptors. - GetObjectNames(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, db *sqlbase.DatabaseDescriptor, scName string, flags tree.DatabaseListFlags) (tree.TableNames, error) + // + // TODO(ajwerner,rohany): This API is utilized to support glob patterns that + // are fundamentally sometimes ambiguous (see GRANT and the ambiguity between + // tables and types). Furthermore, the fact that this buffers everything + // in ram in unfortunate. + GetObjectNames(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, db sqlbase.DatabaseDescriptorInterface, scName string, flags tree.DatabaseListFlags) (tree.TableNames, error) // GetObjectDesc looks up an object by name and returns both its // descriptor and that of its parent database. If the object is not diff --git a/pkg/sql/catalog/accessors/logical_schema_accessors.go b/pkg/sql/catalog/accessors/logical_schema_accessors.go index e5596ff37044..939db3de4431 100644 --- a/pkg/sql/catalog/accessors/logical_schema_accessors.go +++ b/pkg/sql/catalog/accessors/logical_schema_accessors.go @@ -17,6 +17,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -62,17 +64,16 @@ func (l *LogicalSchemaAccessor) GetObjectNames( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, - dbDesc *sqlbase.DatabaseDescriptor, + dbDesc sqlbase.DatabaseDescriptorInterface, scName string, flags tree.DatabaseListFlags, ) (tree.TableNames, error) { - if entry, ok := l.vs.GetVirtualSchema(scName); ok { names := make(tree.TableNames, 0, entry.NumTables()) desc := entry.Desc().TableDesc() entry.VisitTables(func(table catalog.VirtualObject) { name := tree.MakeTableNameWithSchema( - tree.Name(dbDesc.Name), tree.Name(desc.Name), tree.Name(table.Desc().TableDesc().Name)) + tree.Name(dbDesc.GetName()), tree.Name(desc.Name), tree.Name(table.Desc().TableDesc().Name)) name.ExplicitCatalog = flags.ExplicitPrefix name.ExplicitSchema = flags.ExplicitPrefix names = append(names, name) @@ -105,8 +106,22 @@ func (l *LogicalSchemaAccessor) GetObjectDesc( } return nil, nil } + if flags.RequireMutable { + return nil, newMutableAccessToVirtualSchemaError(scEntry, object) + } return desc.Desc(), nil } // Fallthrough. return l.Accessor.GetObjectDesc(ctx, txn, settings, codec, db, schema, object, flags) } + +func newMutableAccessToVirtualSchemaError(entry catalog.VirtualSchema, object string) error { + switch entry.Desc().GetName() { + case "pg_catalog": + return pgerror.Newf(pgcode.InsufficientPrivilege, + "%s is a system catalog", tree.ErrNameString(object)) + default: + return pgerror.Newf(pgcode.WrongObjectType, + "%s is a virtual object and cannot be modified", tree.ErrNameString(object)) + } +} diff --git a/pkg/sql/catalog/accessors/physical_schema_accessors.go b/pkg/sql/catalog/accessors/physical_schema_accessors.go index 4169b73d98a3..ffb9ec155116 100644 --- a/pkg/sql/catalog/accessors/physical_schema_accessors.go +++ b/pkg/sql/catalog/accessors/physical_schema_accessors.go @@ -68,8 +68,8 @@ func (a *CachedPhysicalAccessor) GetDatabaseDesc( codec keys.SQLCodec, name string, flags tree.DatabaseLookupFlags, -) (desc *sqlbase.DatabaseDescriptor, err error) { - isSystemDB := name == sqlbase.SystemDB.Name +) (desc sqlbase.DatabaseDescriptorInterface, err error) { + isSystemDB := name == sqlbase.SystemDatabaseName if !(flags.AvoidCached || isSystemDB || lease.TestingTableLeasesAreDisabled()) { refuseFurtherLookup, dbID, err := a.tc.GetUncommittedDatabaseID(name, flags.Required) if refuseFurtherLookup || err != nil { @@ -82,13 +82,21 @@ func (a *CachedPhysicalAccessor) GetDatabaseDesc( desc, err := a.tc.DatabaseCache().GetDatabaseDescByID(ctx, txn, dbID) if desc == nil && flags.Required { return nil, sqlbase.NewUndefinedDatabaseError(name) + } else if desc == nil { + // NB: We must return the actual value nil here as a typed nil will not + // be easily detectable by the caller. + return nil, nil } return desc, err } // The database was not known in the uncommitted list. Have the db // cache look it up by name for us. - return a.tc.DatabaseCache().GetDatabaseDesc(ctx, a.tc.LeaseManager().DB().Txn, name, flags.Required) + desc, err := a.tc.DatabaseCache().GetDatabaseDesc(ctx, a.tc.LeaseManager().DB().Txn, name, flags.Required) + if desc == nil || err != nil { + return nil, err + } + return desc, nil } // We avoided the cache. Go lower. diff --git a/pkg/sql/catalog/catalog.go b/pkg/sql/catalog/catalog.go index 6b66008737e3..eb37abe984bc 100644 --- a/pkg/sql/catalog/catalog.go +++ b/pkg/sql/catalog/catalog.go @@ -15,26 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) -// Descriptor provides table information for results from a name lookup. -type Descriptor interface { - tree.NameResolutionResult - - // DatabaseDesc returns the underlying database descriptor, or nil if the - // descriptor is not a table backed object. - DatabaseDesc() *sqlbase.DatabaseDescriptor - - // SchemaDesc returns the underlying schema descriptor, or nil if the - // descriptor is not a table backed object. - SchemaDesc() *sqlbase.SchemaDescriptor - - // TableDesc returns the underlying table descriptor, or nil if the - // descriptor is not a table backed object. - TableDesc() *sqlbase.TableDescriptor - - // TypeDesc returns the underlying type descriptor, or nil if the - // descriptor is not a type backed object. - TypeDesc() *sqlbase.TypeDescriptor -} +// Descriptor is an interface for retrieved catalog descriptors. +type Descriptor = sqlbase.DescriptorInterface // VirtualSchemas is a collection of VirtualSchemas. type VirtualSchemas interface { diff --git a/pkg/sql/catalog/catalogkv/catalogkv.go b/pkg/sql/catalog/catalogkv/catalogkv.go index 80ac9b33aa0f..8203c3bbe435 100644 --- a/pkg/sql/catalog/catalogkv/catalogkv.go +++ b/pkg/sql/catalog/catalogkv/catalogkv.go @@ -19,10 +19,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -75,44 +77,15 @@ func ResolveSchemaID( return true, schemaID, nil } -// LookupDescriptorByID looks up the descriptor for `id` and returns it. -// It can be a table or database descriptor. -// Returns the descriptor (if found), a bool representing whether the -// descriptor was found and an error if any. -// -// TODO(ajwerner): Understand the difference between this and GetDescriptorByID. -func LookupDescriptorByID( - ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, -) (sqlbase.DescriptorProto, bool, error) { - var desc sqlbase.DescriptorProto - for _, lookupFn := range []func() (sqlbase.DescriptorProto, error){ - func() (sqlbase.DescriptorProto, error) { - return sqlbase.GetTableDescFromID(ctx, txn, codec, id) - }, - func() (sqlbase.DescriptorProto, error) { - return sqlbase.GetDatabaseDescFromID(ctx, txn, codec, id) - }, - } { - var err error - desc, err = lookupFn() - if err != nil { - if errors.Is(err, sqlbase.ErrDescriptorNotFound) { - continue - } - return nil, false, err - } - return desc, true, nil - } - return nil, false, nil -} - // GetDescriptorByID looks up the descriptor for `id`, validates it. // // In most cases you'll want to use wrappers: `GetDatabaseDescByID` or // `getTableDescByID`. +// +// TODO(ajwerner): Consider passing mutability information into here. func GetDescriptorByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, -) (sqlbase.DescriptorProto, error) { +) (catalog.Descriptor, error) { log.Eventf(ctx, "fetching descriptor with ID %d", id) descKey := sqlbase.MakeDescMetadataKey(codec, id) desc := &sqlbase.Descriptor{} @@ -120,7 +93,18 @@ func GetDescriptorByID( if err != nil { return nil, err } - table, database, typ := desc.Table(ts), desc.GetDatabase(), desc.GetType() + return unwrapDescriptor(ctx, txn, codec, ts, desc) +} + +// unwrapDescriptor takes a descriptor retrieved using a transaction and unwraps +// it into an immutable implementation of DescriptorInterface. It ensures that +// the ModificationTime is set properly. +func unwrapDescriptor( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ts hlc.Timestamp, desc *sqlbase.Descriptor, +) (catalog.Descriptor, error) { + // TODO(ajwerner): Fill in the ModificationTime field for the descriptor. + desc.MaybeSetModificationTimeFromMVCCTimestamp(ctx, ts) + table, database, typ, schema := desc.Table(hlc.Timestamp{}), desc.GetDatabase(), desc.GetType(), desc.GetSchema() switch { case table != nil: if err := table.MaybeFillInDescriptor(ctx, txn, codec); err != nil { @@ -129,16 +113,19 @@ func GetDescriptorByID( if err := table.Validate(ctx, txn, codec); err != nil { return nil, err } - return table, nil + return sqlbase.NewImmutableTableDescriptor(*table), nil case database != nil: - if err := database.Validate(); err != nil { + dbDesc := sqlbase.NewImmutableDatabaseDescriptor(*database) + if err := dbDesc.Validate(); err != nil { return nil, err } - return database, nil + return dbDesc, nil case typ != nil: - return typ, nil + return sqlbase.NewImmutableTypeDescriptor(*typ), nil + case schema != nil: + return sqlbase.NewImmutableSchemaDescriptor(*schema), nil default: - return nil, errors.AssertionFailedf("unknown proto: %s", desc.String()) + return nil, nil } } @@ -163,7 +150,7 @@ func CountUserDescriptors(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec) // GetAllDescriptors looks up and returns all available descriptors. func GetAllDescriptors( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, -) ([]sqlbase.DescriptorProto, error) { +) ([]sqlbase.DescriptorInterface, error) { log.Eventf(ctx, "fetching all descriptors") descsKey := sqlbase.MakeAllDescsMetadataKey(codec) kvs, err := txn.Scan(ctx, descsKey, descsKey.PrefixEnd(), 0) @@ -171,25 +158,17 @@ func GetAllDescriptors( return nil, err } - descs := make([]sqlbase.DescriptorProto, 0, len(kvs)) - for _, kv := range kvs { - desc := &sqlbase.Descriptor{} + // TODO(ajwerner): Fill in ModificationTime. + rawDescs := make([]sqlbase.Descriptor, len(kvs)) + descs := make([]sqlbase.DescriptorInterface, len(kvs)) + for i, kv := range kvs { + desc := &rawDescs[i] if err := kv.ValueProto(desc); err != nil { return nil, err } - switch t := desc.Union.(type) { - case *sqlbase.Descriptor_Table: - table := desc.Table(kv.Value.Timestamp) - if err := table.MaybeFillInDescriptor(ctx, txn, codec); err != nil { - return nil, err - } - descs = append(descs, table) - case *sqlbase.Descriptor_Database: - descs = append(descs, desc.GetDatabase()) - case *sqlbase.Descriptor_Type: - descs = append(descs, desc.GetType()) - default: - return nil, errors.AssertionFailedf("Descriptor.Union has unexpected type %T", t) + var err error + if descs[i], err = unwrapDescriptor(ctx, txn, codec, kv.Value.Timestamp, desc); err != nil { + return nil, err } } return descs, nil @@ -240,10 +219,10 @@ func WriteDescToBatch( b *kv.Batch, codec keys.SQLCodec, descID sqlbase.ID, - desc sqlbase.DescriptorProto, + desc sqlbase.DescriptorInterface, ) (err error) { descKey := sqlbase.MakeDescMetadataKey(codec, descID) - descDesc := sqlbase.WrapDescriptor(desc) + descDesc := desc.DescriptorProto() if kvTrace { log.VEventf(ctx, 2, "Put %s -> %s", descKey, descDesc) } @@ -262,10 +241,10 @@ func WriteNewDescToBatch( b *kv.Batch, codec keys.SQLCodec, tableID sqlbase.ID, - desc sqlbase.DescriptorProto, + desc sqlbase.BaseDescriptorInterface, ) (err error) { descKey := sqlbase.MakeDescMetadataKey(codec, tableID) - descDesc := sqlbase.WrapDescriptor(desc) + descDesc := desc.DescriptorProto() if kvTrace { log.VEventf(ctx, 2, "CPut %s -> %s", descKey, descDesc) } @@ -278,8 +257,8 @@ func WriteNewDescToBatch( func GetDatabaseID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, required bool, ) (sqlbase.ID, error) { - if name == sqlbase.SystemDB.Name { - return sqlbase.SystemDB.ID, nil + if name == sqlbase.SystemDatabaseName { + return keys.SystemDatabaseID, nil } found, dbID, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) if err != nil { @@ -296,15 +275,15 @@ func GetDatabaseID( // found" condition to return an error, use mustGetDatabaseDescByID() instead. func GetDatabaseDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { desc, err := GetDescriptorByID(ctx, txn, codec, id) - if err != nil { + if err != nil || desc == nil { return nil, err } - db, ok := desc.(*sqlbase.DatabaseDescriptor) - if !ok { + db, ok := desc.(*sqlbase.ImmutableDatabaseDescriptor) + if desc != nil && !ok { return nil, pgerror.Newf(pgcode.WrongObjectType, - "%q is not a database", desc.String()) + "%q with ID %d is not a database", desc, log.Safe(id)) } return db, nil } @@ -313,12 +292,13 @@ func GetDatabaseDescByID( // returning an error if the descriptor is not found. func MustGetDatabaseDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { desc, err := GetDatabaseDescByID(ctx, txn, codec, id) if err != nil { return nil, err } if desc == nil { + // TODO(ajwerner): How does this case ever happen? return nil, sqlbase.NewUndefinedDatabaseError(fmt.Sprintf("[%d]", id)) } return desc, nil @@ -330,7 +310,7 @@ func MustGetDatabaseDescByID( // rather than making a round trip for each ID. func GetDatabaseDescriptorsFromIDs( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ids []sqlbase.ID, -) ([]*sqlbase.DatabaseDescriptor, error) { +) ([]*sqlbase.ImmutableDatabaseDescriptor, error) { b := txn.NewBatch() for _, id := range ids { key := sqlbase.MakeDescMetadataKey(codec, id) @@ -339,7 +319,7 @@ func GetDatabaseDescriptorsFromIDs( if err := txn.Run(ctx, b); err != nil { return nil, err } - results := make([]*sqlbase.DatabaseDescriptor, 0, len(ids)) + results := make([]*sqlbase.ImmutableDatabaseDescriptor, 0, len(ids)) for i := range b.Results { result := &b.Results[i] if result.Err != nil { @@ -363,7 +343,8 @@ func GetDatabaseDescriptorsFromIDs( desc.String(), ) } - results = append(results, db) + desc.MaybeSetModificationTimeFromMVCCTimestamp(ctx, result.Rows[0].Value.Timestamp) + results = append(results, sqlbase.NewImmutableDatabaseDescriptor(*db)) } return results, nil } diff --git a/pkg/sql/catalog/catalogkv/physical_accessor.go b/pkg/sql/catalog/catalogkv/physical_accessor.go index 9e39588f54a5..aa169c3e620c 100644 --- a/pkg/sql/catalog/catalogkv/physical_accessor.go +++ b/pkg/sql/catalog/catalogkv/physical_accessor.go @@ -40,12 +40,12 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( codec keys.SQLCodec, name string, flags tree.DatabaseLookupFlags, -) (desc *sqlbase.DatabaseDescriptor, err error) { - if name == sqlbase.SystemDB.Name { +) (desc sqlbase.DatabaseDescriptorInterface, err error) { + if name == sqlbase.SystemDatabaseName { // We can't return a direct reference to SystemDB, because the // caller expects a private object that can be modified in-place. sysDB := sqlbase.MakeSystemDatabaseDesc() - return &sysDB, nil + return sysDB, nil } found, descID, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) @@ -58,7 +58,16 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( return nil, nil } - return GetDatabaseDescByID(ctx, txn, codec, descID) + // NB: Take care to actually return nil here rather than a typed nil which + // will not compare to nil when wrapped in the returned interface. + desc, err = GetDatabaseDescByID(ctx, txn, codec, descID) + if err != nil { + return nil, err + } + if desc == nil { + return nil, nil + } + return desc, err } // IsValidSchema implements the Accessor interface. @@ -73,24 +82,24 @@ func (a UncachedPhysicalAccessor) GetObjectNames( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, - dbDesc *sqlbase.DatabaseDescriptor, + dbDesc sqlbase.DatabaseDescriptorInterface, scName string, flags tree.DatabaseListFlags, ) (tree.TableNames, error) { - ok, schemaID, err := a.IsValidSchema(ctx, txn, codec, dbDesc.ID, scName) + ok, schemaID, err := a.IsValidSchema(ctx, txn, codec, dbDesc.GetID(), scName) if err != nil { return nil, err } if !ok { if flags.Required { - tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.Name), tree.Name(scName), "") + tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.GetName()), tree.Name(scName), "") return nil, sqlbase.NewUnsupportedSchemaUsageError(tree.ErrString(&tn.ObjectNamePrefix)) } return nil, nil } - log.Eventf(ctx, "fetching list of objects for %q", dbDesc.Name) - prefix := sqlbase.NewTableKey(dbDesc.ID, schemaID, "").Key(codec) + log.Eventf(ctx, "fetching list of objects for %q", dbDesc.GetName()) + prefix := sqlbase.NewTableKey(dbDesc.GetID(), schemaID, "").Key(codec) sr, err := txn.Scan(ctx, prefix, prefix.PrefixEnd(), 0) if err != nil { return nil, err @@ -113,7 +122,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( // will only be present in the older system.namespace. To account for this // scenario, we must do this filtering logic. // TODO(solon): This complexity can be removed in 20.2. - dprefix := sqlbase.NewDeprecatedTableKey(dbDesc.ID, "").Key(codec) + dprefix := sqlbase.NewDeprecatedTableKey(dbDesc.GetID(), "").Key(codec) dsr, err := txn.Scan(ctx, dprefix, dprefix.PrefixEnd(), 0) if err != nil { return nil, err @@ -129,7 +138,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( return nil, err } alreadySeen[tableName] = true - tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.Name), tree.Name(scName), tree.Name(tableName)) + tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.GetName()), tree.Name(scName), tree.Name(tableName)) tn.ExplicitCatalog = flags.ExplicitPrefix tn.ExplicitSchema = flags.ExplicitPrefix tableNames = append(tableNames, tn) @@ -145,7 +154,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( if alreadySeen[tableName] { continue } - tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.Name), tree.Name(scName), tree.Name(tableName)) + tn := tree.MakeTableNameWithSchema(tree.Name(dbDesc.GetName()), tree.Name(scName), tree.Name(tableName)) tn.ExplicitCatalog = flags.ExplicitPrefix tn.ExplicitSchema = flags.ExplicitPrefix tableNames = append(tableNames, tn) @@ -203,12 +212,13 @@ func (a UncachedPhysicalAccessor) GetObjectDesc( } // Look up the object using the discovered database descriptor. - rawDesc, err := GetDescriptorByID(ctx, txn, codec, descID) + // TODO(ajwerner): Consider pushing mutability down to GetDescriptorByID. + desc, err := GetDescriptorByID(ctx, txn, codec, descID) if err != nil { return nil, err } - switch desc := rawDesc.(type) { - case *sqlbase.TableDescriptor: + switch desc := desc.(type) { + case *sqlbase.ImmutableTableDescriptor: // We have a descriptor, allow it to be in the PUBLIC or ADD state. Possibly // OFFLINE if the relevant flag is set. acceptableStates := map[sqlbase.TableDescriptor_State]bool{ @@ -226,20 +236,19 @@ func (a UncachedPhysicalAccessor) GetObjectDesc( // As this table can not be renamed by users, it is okay that the first // check fails. if desc.Name == object || - object == sqlbase.NamespaceTableName && db == sqlbase.SystemDB.Name { + object == sqlbase.NamespaceTableName && db == sqlbase.SystemDatabaseName { if flags.RequireMutable { - return sqlbase.NewMutableExistingTableDescriptor(*desc), nil + return sqlbase.NewMutableExistingTableDescriptor(*desc.TableDesc()), nil } - return sqlbase.NewImmutableTableDescriptor(*desc), nil + return desc, nil } } return nil, nil - case *sqlbase.TypeDescriptor: + case *sqlbase.ImmutableTypeDescriptor: if flags.RequireMutable { - return sqlbase.NewMutableExistingTypeDescriptor(*desc), nil + return sqlbase.NewMutableExistingTypeDescriptor(*desc.TypeDesc()), nil } - return sqlbase.NewImmutableTypeDescriptor(*desc), nil - default: - return nil, nil + return desc, nil } + return nil, nil } diff --git a/pkg/sql/catalog/database/database.go b/pkg/sql/catalog/database/database.go index 1b36edad66aa..b0c60a4ae336 100644 --- a/pkg/sql/catalog/database/database.go +++ b/pkg/sql/catalog/database/database.go @@ -71,7 +71,7 @@ func (dc *Cache) setID(name string, id sqlbase.ID) { // getCachedDatabaseDesc looks up the database descriptor from the descriptor cache, // given its name. Returns nil and no error if the name is not present in the // cache. -func (dc *Cache) getCachedDatabaseDesc(name string) (*sqlbase.DatabaseDescriptor, error) { +func (dc *Cache) getCachedDatabaseDesc(name string) (*sqlbase.ImmutableDatabaseDescriptor, error) { dbID, err := dc.GetCachedDatabaseID(name) if dbID == sqlbase.InvalidID || err != nil { return nil, err @@ -82,12 +82,14 @@ func (dc *Cache) getCachedDatabaseDesc(name string) (*sqlbase.DatabaseDescriptor // getCachedDatabaseDescByID looks up the database descriptor from the descriptor cache, // given its ID. -func (dc *Cache) getCachedDatabaseDescByID(id sqlbase.ID) (*sqlbase.DatabaseDescriptor, error) { - if id == sqlbase.SystemDB.ID { +func (dc *Cache) getCachedDatabaseDescByID( + id sqlbase.ID, +) (*sqlbase.ImmutableDatabaseDescriptor, error) { + if id == keys.SystemDatabaseID { // We can't return a direct reference to SystemDB, because the // caller expects a private object that can be modified in-place. sysDB := sqlbase.MakeSystemDatabaseDesc() - return &sysDB, nil + return sysDB, nil } descKey := sqlbase.MakeDescMetadataKey(dc.codec, id) @@ -101,12 +103,16 @@ func (dc *Cache) getCachedDatabaseDescByID(id sqlbase.ID) (*sqlbase.DatabaseDesc return nil, err } - database := desc.GetDatabase() - if database == nil { + dbDesc := desc.GetDatabase() + if dbDesc == nil { return nil, pgerror.Newf(pgcode.WrongObjectType, "[%d] is not a database", id) } - - return database, database.Validate() + database := sqlbase.NewImmutableDatabaseDescriptor(*dbDesc) + if err := database.Validate(); err != nil { + return nil, err + } + // TODO(ajwerner): Set ModificationTime. + return database, nil } // GetDatabaseDesc returns the database descriptor given its name @@ -116,7 +122,7 @@ func (dc *Cache) GetDatabaseDesc( txnRunner func(context.Context, func(context.Context, *kv.Txn) error) error, name string, required bool, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { // Lookup the database in the cache first, falling back to the KV store if it // isn't present. The cache might cause the usage of a recently renamed // database, but that's a race that could occur anyways. @@ -136,15 +142,21 @@ func (dc *Cache) GetDatabaseDesc( return err } a := catalogkv.UncachedPhysicalAccessor{} - desc, err = a.GetDatabaseDesc(ctx, txn, dc.codec, name, + descI, err := a.GetDatabaseDesc(ctx, txn, dc.codec, name, tree.DatabaseLookupFlags{Required: required}) - return err + if err != nil { + return err + } + if descI != nil { + desc = descI.(*sqlbase.ImmutableDatabaseDescriptor) + } + return nil }); err != nil { return nil, err } } if desc != nil { - dc.setID(name, desc.ID) + dc.setID(name, desc.GetID()) } return desc, err } @@ -153,7 +165,7 @@ func (dc *Cache) GetDatabaseDesc( // if it exists in the cache, otherwise falls back to KV operations. func (dc *Cache) GetDatabaseDescByID( ctx context.Context, txn *kv.Txn, id sqlbase.ID, -) (*sqlbase.DatabaseDescriptor, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, error) { desc, err := dc.getCachedDatabaseDescByID(id) if desc == nil || err != nil { if err != nil { @@ -207,8 +219,8 @@ func (dc *Cache) GetCachedDatabaseID(name string) (sqlbase.ID, error) { return id, nil } - if name == sqlbase.SystemDB.Name { - return sqlbase.SystemDB.ID, nil + if name == sqlbase.SystemDB.GetName() { + return sqlbase.SystemDB.GetID(), nil } var nameKey sqlbase.DescriptorKey = sqlbase.NewDatabaseKey(name) diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 4cd454f8ccda..213f90beab1d 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -118,11 +118,14 @@ type Collection struct { // cache is purged whenever events would cause a scan of all descriptors to // return different values, such as when the txn timestamp changes or when // new descriptors are written in the txn. - allDescriptors []sqlbase.DescriptorProto + // + // TODO(ajwerner): This cache may be problematic in clusters with very large + // numbers of descriptors. + allDescriptors []sqlbase.DescriptorInterface // allDatabaseDescriptors is a slice of all available database descriptors. // These are purged at the same time as allDescriptors. - allDatabaseDescriptors []*sqlbase.DatabaseDescriptor + allDatabaseDescriptors []*sqlbase.ImmutableDatabaseDescriptor // allSchemasForDatabase maps databaseID -> schemaID -> schemaName. // For each databaseID, all schemas visible under the database can be @@ -326,7 +329,7 @@ func (tc *Collection) GetTableVersion( // system.users. For now we're sticking to disabling caching of // all system descriptors except the role-members-table. avoidCache := flags.AvoidCached || lease.TestingTableLeasesAreDisabled() || - (tn.Catalog() == sqlbase.SystemDB.Name && tn.ObjectName.String() != sqlbase.RoleMembersTable.Name) + (tn.Catalog() == sqlbase.SystemDatabaseName && tn.ObjectName.String() != sqlbase.RoleMembersTable.Name) if refuseFurtherLookup, table, err := tc.getUncommittedTable( dbID, @@ -731,26 +734,26 @@ func (tc *Collection) GetUncommittedTableByID(id sqlbase.ID) UncommittedTable { // GetAllDescriptors returns all descriptors visible by the transaction, // first checking the Collection's cached descriptors for validity // before defaulting to a key-value scan, if necessary. +// +// TODO(ajwerner): Have this return []sqlbase.DescriptorInterface. func (tc *Collection) GetAllDescriptors( ctx context.Context, txn *kv.Txn, -) ([]sqlbase.DescriptorProto, error) { +) ([]sqlbase.DescriptorInterface, error) { if tc.allDescriptors == nil { descs, err := catalogkv.GetAllDescriptors(ctx, txn, tc.codec()) if err != nil { return nil, err } - // There could be tables with user defined types that need hydrating, // so collect the needed information to set up metadata in those types. - dbDescs := make(map[sqlbase.ID]*sqlbase.DatabaseDescriptor) - typDescs := make(map[sqlbase.ID]*sqlbase.TypeDescriptor) - for i := range descs { - desc := descs[i] - switch t := desc.(type) { - case *sqlbase.DatabaseDescriptor: - dbDescs[t.ID] = t - case *sqlbase.TypeDescriptor: - typDescs[t.ID] = t + dbDescs := make(map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor) + typDescs := make(map[sqlbase.ID]*sqlbase.ImmutableTypeDescriptor) + for _, desc := range descs { + switch desc := desc.(type) { + case *sqlbase.ImmutableDatabaseDescriptor: + dbDescs[desc.GetID()] = desc + case *sqlbase.ImmutableTypeDescriptor: + typDescs[desc.GetID()] = desc } } // If we found any type descriptors, that means that some of the tables we @@ -759,21 +762,21 @@ func (tc *Collection) GetAllDescriptors( // Since we just scanned all the descriptors, we already have everything // we need to hydrate our types. Set up an accessor for the type hydration // method to look into the scanned set of descriptors. - typeLookup := func(id sqlbase.ID) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { + typeLookup := func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { typDesc := typDescs[id] dbDesc := dbDescs[typDesc.ParentID] - schemaName, err := resolver.ResolveSchemaNameByID(ctx, txn, tc.codec(), dbDesc.ID, typDesc.ParentSchemaID) + schemaName, err := resolver.ResolveSchemaNameByID(ctx, txn, tc.codec(), dbDesc.GetID(), typDesc.ParentSchemaID) if err != nil { return nil, nil, err } - name := tree.MakeNewQualifiedTypeName(dbDesc.Name, schemaName, typDesc.Name) + name := tree.MakeNewQualifiedTypeName(dbDesc.GetName(), schemaName, typDesc.GetName()) return &name, typDesc, nil } // Now hydrate all table descriptors. for i := range descs { desc := descs[i] - if tbl, ok := desc.(*sqlbase.TableDescriptor); ok { - if err := sqlbase.HydrateTypesInTableDescriptor(tbl, typeLookup); err != nil { + if tblDesc, ok := desc.(*sqlbase.ImmutableTableDescriptor); ok { + if err := sqlbase.HydrateTypesInTableDescriptor(tblDesc.TableDesc(), typeLookup); err != nil { // If we ran into an error hydrating the types, that means that we // have some sort of corrupted descriptor state. Rather than disable // uses of GetAllDescriptors, just log the error. @@ -794,7 +797,7 @@ func (tc *Collection) GetAllDescriptors( // in the database cache, if necessary. func (tc *Collection) GetAllDatabaseDescriptors( ctx context.Context, txn *kv.Txn, -) ([]*sqlbase.DatabaseDescriptor, error) { +) ([]*sqlbase.ImmutableDatabaseDescriptor, error) { if tc.allDatabaseDescriptors == nil { dbDescIDs, err := catalogkv.GetAllDatabaseDescriptorIDs(ctx, txn, tc.codec()) if err != nil { diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 255b9a66d287..2dc114bc004d 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -442,7 +442,7 @@ func (s Storage) PublishMultiple( b := txn.NewBatch() for tableID, tableDesc := range tableDescs { - if err := catalogkv.WriteDescToBatch(ctx, false /* kvTrace */, s.settings, b, s.codec, tableID, tableDesc.TableDesc()); err != nil { + if err := catalogkv.WriteDescToBatch(ctx, false /* kvTrace */, s.settings, b, s.codec, tableID, tableDesc); err != nil { return err } } diff --git a/pkg/sql/catalog/resolver/resolver.go b/pkg/sql/catalog/resolver/resolver.go index a9ff7fae1fb0..5e15f92bbe37 100644 --- a/pkg/sql/catalog/resolver/resolver.go +++ b/pkg/sql/catalog/resolver/resolver.go @@ -59,7 +59,7 @@ func GetObjectNames( txn *kv.Txn, sc SchemaResolver, codec keys.SQLCodec, - dbDesc *sqlbase.DatabaseDescriptor, + dbDesc sqlbase.DatabaseDescriptorInterface, scName string, explicitPrefix bool, ) (res tree.TableNames, err error) { @@ -199,7 +199,7 @@ func ResolveExistingObject( // prefix for the input object. func ResolveTargetObject( ctx context.Context, sc SchemaResolver, un *tree.UnresolvedObjectName, -) (*sqlbase.DatabaseDescriptor, tree.ObjectNamePrefix, error) { +) (*sqlbase.ImmutableDatabaseDescriptor, tree.ObjectNamePrefix, error) { found, prefix, descI, err := tree.ResolveTarget(ctx, un, sc, sc.CurrentDatabase(), sc.CurrentSearchPath()) if err != nil { return nil, prefix, err @@ -218,7 +218,7 @@ func ResolveTargetObject( return nil, prefix, pgerror.Newf(pgcode.InvalidName, "schema cannot be modified: %q", tree.ErrString(&prefix)) } - return descI.(*sqlbase.DatabaseDescriptor), prefix, nil + return descI.(*sqlbase.ImmutableDatabaseDescriptor), prefix, nil } // ResolveRequiredType can be passed to the ResolveExistingTableObject function to @@ -280,20 +280,24 @@ func ResolveSchemaNameByID( // TODO (rohany): Once we lease types, this should be pushed down into the // leased object collection. func ResolveTypeDescByID( - ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, -) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { - rawDesc, err := catalogkv.GetDescriptorByID(ctx, txn, codec, id) + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + id sqlbase.ID, + lookupFlags tree.ObjectLookupFlags, +) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { + desc, err := catalogkv.GetDescriptorByID(ctx, txn, codec, id) if err != nil { return nil, nil, err } - typDesc, ok := rawDesc.(*sqlbase.TypeDescriptor) - if !ok { - return nil, nil, errors.AssertionFailedf("%s was not a type descriptor", rawDesc) + if desc.TypeDesc() == nil { + return nil, nil, errors.AssertionFailedf("%s was not a type descriptor", desc) } // Get the parent database and schema names to create a fully qualified // name for the type. // TODO (SQLSchema): As we add leasing for all descriptors, these calls // should look into those leased copies, rather than do raw reads. + typDesc := desc.(*sqlbase.ImmutableTypeDescriptor) db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, codec, typDesc.ParentID) if err != nil { return nil, nil, err @@ -302,8 +306,19 @@ func ResolveTypeDescByID( if err != nil { return nil, nil, err } - name := tree.MakeNewQualifiedTypeName(db.Name, schemaName, typDesc.Name) - return &name, typDesc, nil + name := tree.MakeNewQualifiedTypeName(db.GetName(), schemaName, typDesc.GetName()) + var ret sqlbase.TypeDescriptorInterface + if lookupFlags.RequireMutable { + // TODO(ajwerner): Figure this out later when we construct this inside of + // the name resolution. This really shouldn't be happening here. Instead we + // should be taking a SchemaResolver and resolving through it which should + // be able to hit a descs.Collection and determine whether this is a new + // type or not. + desc = sqlbase.NewMutableExistingTypeDescriptor(*typDesc.TypeDesc()) + } else { + ret = typDesc + } + return &name, ret, nil } // GetForDatabase looks up and returns all available diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 1860cca6d19a..724d01f5f29a 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -37,7 +37,7 @@ func validateCheckExpr( ctx context.Context, semaCtx *tree.SemaContext, exprStr string, - tableDesc *sqlbase.TableDescriptor, + tableDesc *sqlbase.MutableTableDescriptor, ie *InternalExecutor, txn *kv.Txn, ) error { @@ -46,7 +46,7 @@ func validateCheckExpr( return err } // Construct AST and then convert to a string, to avoid problems with escaping the check expression - tblref := tree.TableRef{TableID: int64(tableDesc.ID), As: tree.AliasClause{Alias: "t"}} + tblref := tree.TableRef{TableID: int64(tableDesc.GetID()), As: tree.AliasClause{Alias: "t"}} sel := &tree.SelectClause{ Exprs: sqlbase.ColumnsSelectors(tableDesc.Columns), From: tree.From{Tables: []tree.TableExpr{&tblref}}, @@ -54,7 +54,7 @@ func validateCheckExpr( } lim := &tree.Limit{Count: tree.NewDInt(1)} stmt := &tree.Select{Select: sel, Limit: lim} - queryStr := tree.AsStringWithFlags(stmt, tree.FmtParsable) + queryStr := tree.AsStringWithFlags(stmt, tree.FmtSerializable) log.Infof(ctx, "Validating check constraint %q with query %q", tree.SerializeForDisplay(expr), queryStr) rows, err := ie.QueryRow(ctx, "validate check constraint", txn, queryStr) @@ -360,7 +360,7 @@ func checkMutationInput( } else if !res && checkVals[colIdx] != tree.DNull { // Failed to satisfy CHECK constraint, so unwrap the serialized // check expression to display to the user. - expr, exprErr := schemaexpr.DeserializeTableDescExpr(ctx, semaCtx, tabDesc.TableDesc(), checks[i].Expr) + expr, exprErr := schemaexpr.DeserializeTableDescExpr(ctx, semaCtx, tabDesc, checks[i].Expr) if exprErr != nil { // If we ran into an error trying to read the check constraint, wrap it // and return. diff --git a/pkg/sql/colexec/any_not_null_agg_tmpl.go b/pkg/sql/colexec/any_not_null_agg_tmpl.go index 70e54845dc58..ffc07ab045ab 100644 --- a/pkg/sql/colexec/any_not_null_agg_tmpl.go +++ b/pkg/sql/colexec/any_not_null_agg_tmpl.go @@ -30,9 +30,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/cast_tmpl.go b/pkg/sql/colexec/cast_tmpl.go index cd934bd47543..ff08c5a8ae50 100644 --- a/pkg/sql/colexec/cast_tmpl.go +++ b/pkg/sql/colexec/cast_tmpl.go @@ -36,9 +36,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* type _R_GO_TYPE interface{} diff --git a/pkg/sql/colexec/const_tmpl.go b/pkg/sql/colexec/const_tmpl.go index 8d0d0e781677..db484ac2d065 100644 --- a/pkg/sql/colexec/const_tmpl.go +++ b/pkg/sql/colexec/const_tmpl.go @@ -31,9 +31,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/distinct_tmpl.go b/pkg/sql/colexec/distinct_tmpl.go index 21f1482c2d67..987126066964 100644 --- a/pkg/sql/colexec/distinct_tmpl.go +++ b/pkg/sql/colexec/distinct_tmpl.go @@ -84,9 +84,6 @@ func NewOrderedDistinct( }, nil } -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. @@ -241,30 +238,25 @@ func (p *distinct_TYPEOp) Next(ctx context.Context) coldata.Batch { // Bounds check elimination. sel = sel[:n] if nulls != nil { - for _, checkIdx := range sel { - outputIdx := checkIdx - _CHECK_DISTINCT_WITH_NULLS(checkIdx, outputIdx, lastVal, nulls, lastValNull, col, outputCol) + for _, idx := range sel { + lastVal, lastValNull = checkDistinctWithNulls(idx, idx, lastVal, nulls, lastValNull, col, outputCol) } } else { - for _, checkIdx := range sel { - outputIdx := checkIdx - _CHECK_DISTINCT(checkIdx, outputIdx, lastVal, col, outputCol) + for _, idx := range sel { + lastVal = checkDistinct(idx, idx, lastVal, col, outputCol) } } } else { - // Bounds check elimination. col = execgen.SLICE(col, 0, n) outputCol = outputCol[:n] _ = outputCol[n-1] if nulls != nil { - for execgen.RANGE(checkIdx, col, 0, n) { - outputIdx := checkIdx - _CHECK_DISTINCT_WITH_NULLS(checkIdx, outputIdx, lastVal, nulls, lastValNull, col, outputCol) + for execgen.RANGE(idx, col, 0, n) { + lastVal, lastValNull = checkDistinctWithNulls(idx, idx, lastVal, nulls, lastValNull, col, outputCol) } } else { - for execgen.RANGE(checkIdx, col, 0, n) { - outputIdx := checkIdx - _CHECK_DISTINCT(checkIdx, outputIdx, lastVal, col, outputCol) + for execgen.RANGE(idx, col, 0, n) { + lastVal = checkDistinct(idx, idx, lastVal, col, outputCol) } } } @@ -297,11 +289,11 @@ func (p partitioner_TYPE) partitionWithOrder( outputCol[0] = true if nulls != nil { for outputIdx, checkIdx := range order { - _CHECK_DISTINCT_WITH_NULLS(checkIdx, outputIdx, lastVal, nulls, lastValNull, col, outputCol) + lastVal, lastValNull = checkDistinctWithNulls(checkIdx, outputIdx, lastVal, nulls, lastValNull, col, outputCol) } } else { for outputIdx, checkIdx := range order { - _CHECK_DISTINCT(checkIdx, outputIdx, lastVal, col, outputCol) + lastVal = checkDistinct(checkIdx, outputIdx, lastVal, col, outputCol) } } } @@ -321,14 +313,12 @@ func (p partitioner_TYPE) partition(colVec coldata.Vec, outputCol []bool, n int) outputCol = outputCol[:n] outputCol[0] = true if nulls != nil { - for execgen.RANGE(checkIdx, col, 0, n) { - outputIdx := checkIdx - _CHECK_DISTINCT_WITH_NULLS(checkIdx, outputIdx, lastVal, nulls, lastValNull, col, outputCol) + for execgen.RANGE(idx, col, 0, n) { + lastVal, lastValNull = checkDistinctWithNulls(idx, idx, lastVal, nulls, lastValNull, col, outputCol) } } else { - for execgen.RANGE(checkIdx, col, 0, n) { - outputIdx := checkIdx - _CHECK_DISTINCT(checkIdx, outputIdx, lastVal, col, outputCol) + for execgen.RANGE(idx, col, 0, n) { + lastVal = checkDistinct(idx, idx, lastVal, col, outputCol) } } } @@ -336,33 +326,27 @@ func (p partitioner_TYPE) partition(colVec coldata.Vec, outputCol []bool, n int) // {{end}} // {{end}} -// {{/* -// _CHECK_DISTINCT retrieves the value at the ith index of col, compares it +// checkDistinct retrieves the value at the ith index of col, compares it // to the passed in lastVal, and sets the ith value of outputCol to true if the // compared values were distinct. It presumes that the current batch has no null // values. -func _CHECK_DISTINCT( +// execgen:inline +func checkDistinct( checkIdx int, outputIdx int, lastVal _GOTYPE, col []_GOTYPE, outputCol []bool, -) { // */}} - - // {{define "checkDistinct" -}} - // {{with .Global}} +) _GOTYPE { v := execgen.UNSAFEGET(col, checkIdx) var unique bool _ASSIGN_NE(unique, v, lastVal, _, col, _) outputCol[outputIdx] = outputCol[outputIdx] || unique execgen.COPYVAL(lastVal, v) - // {{end}} - // {{end}} - - // {{/* -} // */}} + return lastVal +} -// {{/* -// _CHECK_DISTINCT_WITH_NULLS behaves the same as _CHECK_DISTINCT, but it also +// checkDistinctWithNulls behaves the same as checkDistinct, but it also // considers whether the previous and current values are null. It assumes that // `nulls` is non-nil. -func _CHECK_DISTINCT_WITH_NULLS( +// execgen:inline +func checkDistinctWithNulls( checkIdx int, outputIdx int, lastVal _GOTYPE, @@ -370,10 +354,7 @@ func _CHECK_DISTINCT_WITH_NULLS( lastValNull bool, col []_GOTYPE, outputCol []bool, -) { // */}} - - // {{define "checkDistinctWithNulls" -}} - // {{with .Global}} +) (lastVal _GOTYPE, lastValNull bool) { null := nulls.NullAt(checkIdx) if null { if !lastValNull { @@ -393,9 +374,5 @@ func _CHECK_DISTINCT_WITH_NULLS( } execgen.COPYVAL(lastVal, v) } - lastValNull = null - // {{end}} - // {{end}} - - // {{/* -} // */}} + return lastVal, null +} diff --git a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go index cdb7b0a6a149..ec1b23d4368a 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go @@ -33,11 +33,6 @@ func genDistinctOps(inputFileContents string, wr io.Writer) error { assignNeRe := makeFunctionRegex("_ASSIGN_NE", 6) s = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall("Assign", 6)) - innerLoopRe := makeFunctionRegex("_CHECK_DISTINCT", 5) - s = innerLoopRe.ReplaceAllString(s, `{{template "checkDistinct" buildDict "Global" .}}`) - - innerLoopNullsRe := makeFunctionRegex("_CHECK_DISTINCT_WITH_NULLS", 7) - s = innerLoopNullsRe.ReplaceAllString(s, `{{template "checkDistinctWithNulls" buildDict "Global" .}}`) s = replaceManipulationFuncs(s) // Now, generate the op, from the template. @@ -48,6 +43,7 @@ func genDistinctOps(inputFileContents string, wr io.Writer) error { return tmpl.Execute(wr, sameTypeComparisonOpToOverloads[tree.NE]) } + func init() { registerGenerator(genDistinctOps, "distinct.eg.go", distinctOpsTmpl) } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/main.go b/pkg/sql/colexec/execgen/cmd/execgen/main.go index eba42ab1dcd9..e0235bba19dd 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/main.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/main.go @@ -20,6 +20,7 @@ import ( "path/filepath" "regexp" + "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecbase/colexecerror" "github.com/cockroachdb/errors" "github.com/cockroachdb/gostdlib/x/tools/imports" @@ -59,11 +60,11 @@ type entry struct { inputFile string } -func registerGenerator(g generator, filename, dep string) { - if _, ok := generators[filename]; ok { - colexecerror.InternalError(fmt.Sprintf("%s generator already registered", filename)) +func registerGenerator(g generator, outputFile, inputFile string) { + if _, ok := generators[outputFile]; ok { + colexecerror.InternalError(fmt.Sprintf("%s generator already registered", outputFile)) } - generators[filename] = entry{fn: g, inputFile: dep} + generators[outputFile] = entry{fn: g, inputFile: inputFile} } func (g *execgenTool) run(args ...string) bool { @@ -108,7 +109,6 @@ func (g *execgenTool) run(args ...string) bool { } } } - return true } @@ -119,16 +119,21 @@ func (g *execgenTool) generate(path string, entry entry) error { var buf bytes.Buffer buf.WriteString("// Code generated by execgen; DO NOT EDIT.\n") - var inputFileContents []byte + var inputFileContents string var err error if entry.inputFile != "" { - inputFileContents, err = ioutil.ReadFile(entry.inputFile) + inputFileBytes, err := ioutil.ReadFile(entry.inputFile) + if err != nil { + return err + } + // Inline functions with // execgen:inline. + inputFileContents, err = execgen.InlineFuncs(string(inputFileBytes)) if err != nil { return err } } - err = entry.fn(string(inputFileContents), &buf) + err = entry.fn(inputFileContents, &buf) if err != nil { return err } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go index 1e4cd2a469e7..d94378b98273 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go @@ -437,8 +437,8 @@ func (c intCustomizer) getBinOpAssignFunc() assignFunc { colexecerror.ExpectedError(tree.ErrDivByZero) } leftTmpDec, rightTmpDec := &_overloadHelper.tmpDec1, &_overloadHelper.tmpDec2 - leftTmpDec.SetFinite(int64({{.Left}}), 0) - rightTmpDec.SetFinite(int64({{.Right}}), 0) + leftTmpDec.SetInt64(int64({{.Left}})) + rightTmpDec.SetInt64(int64({{.Right}})) if _, err := tree.{{.Ctx}}.Quo(&{{.Target}}, leftTmpDec, rightTmpDec); err != nil { colexecerror.ExpectedError(err) } @@ -487,7 +487,7 @@ func (c decimalIntCustomizer) getBinOpAssignFunc() assignFunc { } {{end}} tmpDec := &_overloadHelper.tmpDec1 - tmpDec.SetFinite(int64({{.Right}}), 0) + tmpDec.SetInt64(int64({{.Right}})) if _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, &{{.Left}}, tmpDec); err != nil { colexecerror.ExpectedError(err) } @@ -520,7 +520,7 @@ func (c intDecimalCustomizer) getBinOpAssignFunc() assignFunc { } {{end}} tmpDec := &_overloadHelper.tmpDec1 - tmpDec.SetFinite(int64({{.Left}}), 0) + tmpDec.SetInt64(int64({{.Left}})) _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, tmpDec, &{{.Right}}) if err != nil { colexecerror.ExpectedError(err) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go index 42a3652afec2..f7a02f9c744e 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go @@ -242,7 +242,7 @@ func (c decimalIntCustomizer) getCmpOpCompareFunc() compareFunc { t := template.Must(template.New("").Parse(` { tmpDec := &_overloadHelper.tmpDec1 - tmpDec.SetFinite(int64({{.Right}}), 0) + tmpDec.SetInt64(int64({{.Right}})) {{.Target}} = tree.CompareDecimals(&{{.Left}}, tmpDec) } `)) @@ -280,7 +280,7 @@ func (c intDecimalCustomizer) getCmpOpCompareFunc() compareFunc { t := template.Must(template.New("").Parse(` { tmpDec := &_overloadHelper.tmpDec1 - tmpDec.SetFinite(int64({{.Left}}), 0) + tmpDec.SetInt64(int64({{.Left}})) {{.Target}} = tree.CompareDecimals(tmpDec, &{{.Right}}) } `)) diff --git a/pkg/sql/colexec/execgen/datadriven_test.go b/pkg/sql/colexec/execgen/datadriven_test.go new file mode 100644 index 000000000000..c049fc9af53c --- /dev/null +++ b/pkg/sql/colexec/execgen/datadriven_test.go @@ -0,0 +1,36 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package execgen + +import ( + "testing" + + "github.com/cockroachdb/datadriven" +) + +// Walk walks path for datadriven files and calls RunTest on them. +func TestExecgen(t *testing.T) { + datadriven.Walk(t, "testdata", func(t *testing.T, path string) { + datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { + switch d.Cmd { + case "inline": + s, err := InlineFuncs(d.Input) + if err != nil { + t.Fatal(err) + } + return s + default: + t.Fatalf("unknown command: %s", d.Cmd) + return "" + } + }) + }) +} diff --git a/pkg/sql/colexec/execgen/inline.go b/pkg/sql/colexec/execgen/inline.go new file mode 100644 index 000000000000..9e81d4ad781d --- /dev/null +++ b/pkg/sql/colexec/execgen/inline.go @@ -0,0 +1,365 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package execgen + +import ( + "bytes" + "fmt" + "go/parser" + "go/token" + + "github.com/cockroachdb/errors" + "github.com/dave/dst" + "github.com/dave/dst/decorator" + "github.com/dave/dst/dstutil" +) + +// InlineFuncs takes an input file's contents and inlines all functions +// annotated with // execgen:inline into their callsites via AST manipulation. +func InlineFuncs(inputFileContents string) (string, error) { + f, err := decorator.ParseFile(token.NewFileSet(), "", inputFileContents, parser.ParseComments) + if err != nil { + return "", err + } + + templateFuncMap := make(map[string]*dst.FuncDecl) + + // First, run over the input file, searching for functions that are annotated + // with execgen:inline. + n := extractInlineFuncDecls(f, templateFuncMap) + + // Do a second pass over the AST, this time replacing calls to the inlined + // functions with the inlined function itself. + dstutil.Apply(n, func(cursor *dstutil.Cursor) bool { + n := cursor.Node() + // There are two cases. AssignStmt, which are like: + // a = foo() + // and ExprStmt, which are simply: + // foo() + // AssignStmts need to do extra work for inlining, because we have to + // simulate producing return values. + switch n := n.(type) { + case *dst.AssignStmt: + // Search for assignment function call: + // a = foo() + callExpr, ok := n.Rhs[0].(*dst.CallExpr) + if !ok { + return true + } + decl := getTemplateFunc(templateFuncMap, callExpr) + if decl == nil { + return true + } + if len(n.Rhs) > 1 { + panic("can't do template replacement with more than a single RHS to a CallExpr") + } + + // Now we've got a callExpr. We need to inline the function call, and + // convert the result into the assignment variable. + + // Produce declarations for each return value of the function to inline. + retValDeclStmt, retValNames := extractReturnValues(decl) + // inlinedStatements is a BlockStmt (a set of statements within curly + // braces) that contains the entirety of the statements that result from + // inlining the call. We make this a BlockStmt to avoid issues with + // variable shadowing. + // The first thing that goes in the BlockStmt is the ret val declarations. + // When we're done, the BlockStmt for a statement + // a, b = foo(x, y) + // where foo was defined as + // func foo(b string, c string) { ... } + // will look like: + // { + // var ( + // __retval_0 bool + // __retval_1 int + // ) + // ... + // { + // b := x + // c := y + // ... the contents of func foo() except its return ... + // { + // // If foo() had `return true, j`, we'll generate the code: + // __retval_0 = true + // __retval_1 = j + // } + // } + // a = __retval_0 + // b = __retval_1 + // } + inlinedStatements := &dst.BlockStmt{ + List: []dst.Stmt{retValDeclStmt}, + } + + // Replace return statements with assignments to the return values. + // Make a copy of the function to inline, and walk through it, replacing + // return statements at the end of the body with assignments to the return + // value declarations we made first. + body := dst.Clone(decl.Body).(*dst.BlockStmt) + body = replaceReturnStatements(decl.Name.Name, body, func(stmt *dst.ReturnStmt) dst.Stmt { + returnAssignmentSpecs := make([]dst.Stmt, len(retValNames)) + for i := range retValNames { + returnAssignmentSpecs[i] = &dst.AssignStmt{ + Lhs: []dst.Expr{dst.NewIdent(retValNames[i])}, + Tok: token.ASSIGN, + Rhs: []dst.Expr{stmt.Results[i]}, + } + } + // Replace the return with the new assignments. + return &dst.BlockStmt{List: returnAssignmentSpecs} + }) + // Reassign input parameters to formal parameters. + reassignmentStmt := getFormalParamReassignments(decl, callExpr) + inlinedStatements.List = append(inlinedStatements.List, &dst.BlockStmt{ + List: append([]dst.Stmt{reassignmentStmt}, body.List...), + }) + // Assign mangled return values to the original assignment variables. + newAssignment := dst.Clone(n).(*dst.AssignStmt) + newAssignment.Rhs = make([]dst.Expr, len(retValNames)) + for i := range retValNames { + newAssignment.Rhs[i] = dst.NewIdent(retValNames[i]) + } + inlinedStatements.List = append(inlinedStatements.List, newAssignment) + cursor.Replace(inlinedStatements) + + case *dst.ExprStmt: + // Search for raw function call: + // foo() + callExpr, ok := n.X.(*dst.CallExpr) + if !ok { + return true + } + decl := getTemplateFunc(templateFuncMap, callExpr) + if decl == nil { + return true + } + + reassignments := getFormalParamReassignments(decl, callExpr) + + // This case is simpler than the AssignStmt case. It's identical, except + // there is no mangled return value name block, nor re-assignment to + // the mangled returns after the inlined function. + funcBlock := &dst.BlockStmt{ + List: []dst.Stmt{reassignments}, + } + body := dst.Clone(decl.Body).(*dst.BlockStmt) + + // Remove return values if there are any, since we're ignoring returns + // as a raw function call. + body = replaceReturnStatements(decl.Name.Name, body, nil) + // Add the inlined function body to the block. + funcBlock.List = append(funcBlock.List, body.List...) + + cursor.Replace(funcBlock) + } + return true + }, nil) + + b := bytes.Buffer{} + _ = decorator.Fprint(&b, f) + return b.String(), nil +} + +// extractInlineFuncDecls searches the input file for functions that are +// annotated with execgen:inline, extracts them into templateFuncMap, and +// deletes them from the AST. +func extractInlineFuncDecls(f *dst.File, templateFuncMap map[string]*dst.FuncDecl) dst.Node { + return dstutil.Apply(f, func(cursor *dstutil.Cursor) bool { + n := cursor.Node() + switch n := n.(type) { + case *dst.FuncDecl: + var mustInline bool + for _, dec := range n.Decorations().Start.All() { + if dec == "// execgen:inline" { + mustInline = true + break + } + } + if !mustInline { + // Nothing to do, but recurse further. + return true + } + for _, p := range n.Type.Params.List { + if len(p.Names) > 1 { + panic("can't currently deal with multiple names per type in decls") + } + } + // Store the function in a map. + templateFuncMap[n.Name.Name] = n + // Replace the function textually with a fake constant, such as: + // `const _ = "inlined_blahFunc"`. We do this instead + // of completely deleting it to prevent "important comments" above the + // function to be deleted, such as template comments like {{end}}. This + // is kind of a quirk of the way the comments are parsed, but nonetheless + // this is an easy fix so we'll leave it for now. + cursor.Replace(&dst.GenDecl{ + Tok: token.CONST, + Specs: []dst.Spec{ + &dst.ValueSpec{ + Names: []*dst.Ident{dst.NewIdent("_")}, + Values: []dst.Expr{ + &dst.BasicLit{ + Kind: token.STRING, + Value: fmt.Sprintf(`"inlined_%s"`, n.Name.Name), + }, + }, + }, + }, + Decs: dst.GenDeclDecorations{ + NodeDecs: n.Decs.NodeDecs, + }, + }) + return false + } + return true + }, nil) +} + +// extractReturnValues generates return value variables. It will produce one +// statement per return value of the input FuncDecl. For example, for +// a FuncDecl that returns two boolean arguments, lastVal and lastValNull, +// two statements will be returned: +// var __retval_lastVal bool +// var __retval_lastValNull bool +// The second return is a slice of the names of each of the mangled return +// declarations, in this example, __retval_lastVal and __retval_lastValNull. +func extractReturnValues(decl *dst.FuncDecl) (retValDeclStmt dst.Stmt, retValNames []string) { + if decl.Type.Results == nil { + return &dst.EmptyStmt{}, nil + } + results := decl.Type.Results.List + retValNames = make([]string, len(results)) + specs := make([]dst.Spec, len(results)) + for i, result := range results { + var retvalName string + // Make a mangled name. + if len(result.Names) == 0 { + retvalName = fmt.Sprintf("__retval_%d", i) + } else { + retvalName = fmt.Sprintf("__retval_%s", result.Names[0]) + } + retValNames[i] = retvalName + specs[i] = &dst.ValueSpec{ + Names: []*dst.Ident{dst.NewIdent(retvalName)}, + Type: dst.Clone(result.Type).(dst.Expr), + } + } + return &dst.DeclStmt{ + Decl: &dst.GenDecl{ + Tok: token.VAR, + Specs: specs, + }, + }, retValNames +} + +// getFormalParamReassignments creates a new DEFINE (:=) statement per parameter +// to a FuncDecl, which makes a fresh variable with the same name as the formal +// parameter name and assigns it to the corresponding name in the CallExpr. +// +// For example, given a FuncDecl: +// +// func foo(a int, b string) { ... } +// +// and a CallExpr +// +// foo(x, y) +// +// we'll return the statement: +// +// var ( +// a int = x +// b string = y +// ) +// +// In the case where the formal parameter name is the same as the the input +// parameter name, no extra assignment is created. +func getFormalParamReassignments(decl *dst.FuncDecl, callExpr *dst.CallExpr) dst.Stmt { + formalParams := decl.Type.Params.List + reassignmentSpecs := make([]dst.Spec, 0, len(formalParams)) + for i, formalParam := range formalParams { + if inputIdent, ok := callExpr.Args[i].(*dst.Ident); ok && inputIdent.Name == formalParam.Names[0].Name { + continue + } + reassignmentSpecs = append(reassignmentSpecs, &dst.ValueSpec{ + Names: []*dst.Ident{dst.NewIdent(formalParam.Names[0].Name)}, + Type: dst.Clone(formalParam.Type).(dst.Expr), + Values: []dst.Expr{callExpr.Args[i]}, + }) + } + if len(reassignmentSpecs) == 0 { + return &dst.EmptyStmt{} + } + return &dst.DeclStmt{ + Decl: &dst.GenDecl{ + Tok: token.VAR, + Specs: reassignmentSpecs, + }, + } +} + +// replaceReturnStatements edits the input BlockStmt, from the function funcName, +// replacing ReturnStmts at the end of the BlockStmts with the results of +// applying returnEditor on the ReturnStmt or deleting them if the modifier is +// nil. +// It will panic if any return statements are not in the final position of the +// input block. +func replaceReturnStatements( + funcName string, stmt *dst.BlockStmt, returnModifier func(*dst.ReturnStmt) dst.Stmt, +) *dst.BlockStmt { + // Remove return values if there are any, since we're ignoring returns + // as a raw function call. + var seenReturn bool + return dstutil.Apply(stmt, func(cursor *dstutil.Cursor) bool { + if seenReturn { + panic(fmt.Errorf("can't inline function %s: return not at end of body (found %s)", funcName, cursor.Node())) + } + n := cursor.Node() + switch t := n.(type) { + case *dst.FuncLit: + // A FuncLit is a function literal, like: + // x := func() int { return 3 } + // We don't recurse into function literals since the return statements + // they contain aren't relevant to the inliner. + return false + case *dst.ReturnStmt: + seenReturn = true + if returnModifier == nil { + cursor.Delete() + return false + } + cursor.Replace(returnModifier(t)) + return false + } + return true + }, nil).(*dst.BlockStmt) +} + +// getTemplateFunc returns the corresponding FuncDecl for a CallExpr from the +// map, using the CallExpr's name to look up the FuncDecl from templateFuncs. +func getTemplateFunc(templateFuncs map[string]*dst.FuncDecl, n *dst.CallExpr) *dst.FuncDecl { + ident, ok := n.Fun.(*dst.Ident) + if !ok { + return nil + } + + decl, ok := templateFuncs[ident.Name] + if !ok { + return nil + } + if decl.Type.Params.NumFields() != len(n.Args) { + panic(errors.Newf( + "%s expected %d arguments, found %d", + decl.Name, decl.Type.Params.NumFields(), len(n.Args)), + ) + } + return decl +} diff --git a/pkg/sql/colexec/execgen/inline_test.go b/pkg/sql/colexec/execgen/inline_test.go new file mode 100644 index 000000000000..d550f56aca6b --- /dev/null +++ b/pkg/sql/colexec/execgen/inline_test.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package execgen + +import ( + "testing" + + "github.com/dave/dst" + "github.com/stretchr/testify/assert" +) + +func TestGetFormalParamReassignments(t *testing.T) { + tests := []struct { + funcDecl string + callExpr string + + expectedStmts string + }{ + { + funcDecl: `func a() {}`, + callExpr: `a()`, + expectedStmts: ``, + }, + { + funcDecl: `func a(a int) {}`, + callExpr: `a(b)`, + expectedStmts: `var a int = b`, + }, + { + funcDecl: `func a(a int, b int) {}`, + callExpr: `a(x, y)`, + expectedStmts: `var ( + a int = x + b int = y + )`, + }, + { + funcDecl: `func a(a int, b int) {}`, + callExpr: `a(a, c)`, + expectedStmts: `var b int = c`, + }, + } + for _, tt := range tests { + callExpr := parseStmts(tt.callExpr)[0].(*dst.ExprStmt).X.(*dst.CallExpr) + funcDecl := parseDecls(tt.funcDecl)[0].(*dst.FuncDecl) + stmt := getFormalParamReassignments(funcDecl, callExpr) + actual := prettyPrintStmts(stmt) + assert.Equal(t, tt.expectedStmts, actual) + } +} + +func TestExtractReturnValues(t *testing.T) { + tests := []struct { + decl string + expectedRetDecls string + }{ + { + decl: "func foo(a int) {}", + expectedRetDecls: "", + }, + { + decl: "func foo(a int) (int, string) {}", + expectedRetDecls: `var ( + __retval_0 int + __retval_1 string + )`, + }, + { + decl: "func foo(a int) int {}", + expectedRetDecls: `var __retval_0 int`, + }, + { + decl: "func foo(a int) (a int, b string) {}", + expectedRetDecls: `var ( + __retval_a int + __retval_b string + )`, + }, + } + for _, tt := range tests { + decl := parseDecls(tt.decl)[0].(*dst.FuncDecl) + retValDecl, retValNames := extractReturnValues(decl) + if _, ok := retValDecl.(*dst.EmptyStmt); ok { + assert.Equal(t, 0, len(retValNames)) + } else { + specs := retValDecl.(*dst.DeclStmt).Decl.(*dst.GenDecl).Specs + assert.Equal(t, len(specs), len(retValNames)) + for i := range retValNames { + assert.Equal(t, retValNames[i], specs[i].(*dst.ValueSpec).Names[0].Name) + } + } + assert.Equal(t, tt.expectedRetDecls, prettyPrintStmts(retValDecl)) + } +} diff --git a/pkg/sql/colexec/execgen/testdata/inline b/pkg/sql/colexec/execgen/testdata/inline new file mode 100644 index 000000000000..35ea0d5c476a --- /dev/null +++ b/pkg/sql/colexec/execgen/testdata/inline @@ -0,0 +1,146 @@ +inline +package main + +func a() { + b() +} + +func c() { + b() +} + +// execgen:inline +func b() { + foo = bar +} +---- +---- +package main + +func a() { + { + foo = bar + } +} + +func c() { + { + foo = bar + } +} + +// execgen:inline +const _ = "inlined_b" +---- +---- + +inline +package main + +func a() { + b(x, b, y) +} + +// execgen:inline +func b(a int, b int, c int) { + foo = bar +} +---- +---- +package main + +func a() { + { + var ( + a int = x + c int = y + ) + foo = bar + } +} + +// execgen:inline +const _ = "inlined_b" +---- +---- + +inline +package main + +func a() { + ret1, ret2 := b(x, b, y) +} + +func c() { + ret3, ret4 := b(x, b, y) +} + +func d() { + b(x, b, y) +} + +// execgen:inline +func b(a int, b int, c int) (int, int) { + foo = bar + return b, c +} +---- +---- +package main + +func a() { + { + var ( + __retval_0 int + __retval_1 int + ) + { + var ( + a int = x + c int = y + ) + foo = bar + { + __retval_0 = b + __retval_1 = c + } + } + ret1, ret2 := __retval_0, __retval_1 + } +} + +func c() { + { + var ( + __retval_0 int + __retval_1 int + ) + { + var ( + a int = x + c int = y + ) + foo = bar + { + __retval_0 = b + __retval_1 = c + } + } + ret3, ret4 := __retval_0, __retval_1 + } +} + +func d() { + { + var ( + a int = x + c int = y + ) + foo = bar + } +} + +// execgen:inline +const _ = "inlined_b" +---- +---- diff --git a/pkg/sql/colexec/execgen/util_test.go b/pkg/sql/colexec/execgen/util_test.go new file mode 100644 index 000000000000..8c141afd833d --- /dev/null +++ b/pkg/sql/colexec/execgen/util_test.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package execgen + +import ( + "fmt" + "strings" + + "github.com/dave/dst" + "github.com/dave/dst/decorator" +) + +func prettyPrintStmts(stmts ...dst.Stmt) string { + if len(stmts) == 0 { + return "" + } + f := &dst.File{ + Name: dst.NewIdent("main"), + Decls: []dst.Decl{ + &dst.FuncDecl{ + Name: dst.NewIdent("test"), + Type: &dst.FuncType{}, + Body: &dst.BlockStmt{ + List: stmts, + }, + }, + }, + } + var ret strings.Builder + _ = decorator.Fprint(&ret, f) + prelude := `package main + +func test() { +` + postlude := `} +` + s := ret.String() + return strings.TrimSpace(s[len(prelude) : len(s)-len(postlude)]) +} + +func parseStmts(stmts string) []dst.Stmt { + inputStr := fmt.Sprintf(`package main +func test() { + %s +}`, stmts) + f, err := decorator.Parse(inputStr) + if err != nil { + panic(err) + } + return f.Decls[0].(*dst.FuncDecl).Body.List +} + +func parseDecls(decls string) []dst.Decl { + inputStr := fmt.Sprintf(`package main +%s +`, decls) + f, err := decorator.Parse(inputStr) + if err != nil { + panic(err) + } + return f.Decls +} diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index 2f8482c07a90..07dabb6418b5 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -28,9 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/hash_utils_tmpl.go b/pkg/sql/colexec/hash_utils_tmpl.go index d8e6577cd017..308b0cb49e6d 100644 --- a/pkg/sql/colexec/hash_utils_tmpl.go +++ b/pkg/sql/colexec/hash_utils_tmpl.go @@ -30,9 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // _GOTYPESLICE is a template Go type slice variable. diff --git a/pkg/sql/colexec/hashtable_tmpl.go b/pkg/sql/colexec/hashtable_tmpl.go index 8fe7c24ee659..2bcb66c9779a 100644 --- a/pkg/sql/colexec/hashtable_tmpl.go +++ b/pkg/sql/colexec/hashtable_tmpl.go @@ -21,14 +21,10 @@ package colexec import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecbase/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // _LEFT_CANONICAL_TYPE_FAMILY is the template variable. diff --git a/pkg/sql/colexec/main_test.go b/pkg/sql/colexec/main_test.go index 851d2e170316..93d0c552fc0a 100644 --- a/pkg/sql/colexec/main_test.go +++ b/pkg/sql/colexec/main_test.go @@ -12,6 +12,7 @@ package colexec import ( "context" + "flag" "fmt" "os" "testing" @@ -66,12 +67,16 @@ func TestMain(m *testing.M) { testDiskAcc = &diskAcc defer testDiskAcc.Close(ctx) - // Pick a random batch size in [minBatchSize, coldata.MaxBatchSize] - // range. The randomization can be disabled using COCKROACH_RANDOMIZE_BATCH_SIZE=false. - randomBatchSize := generateBatchSize() - fmt.Printf("coldata.BatchSize() is set to %d\n", randomBatchSize) - if err := coldata.SetBatchSizeForTests(randomBatchSize); err != nil { - colexecerror.InternalError(err) + flag.Parse() + if f := flag.Lookup("test.bench"); f == nil || f.Value.String() == "" { + // If we're running benchmarks, don't set a random batch size. + // Pick a random batch size in [minBatchSize, coldata.MaxBatchSize] + // range. The randomization can be disabled using COCKROACH_RANDOMIZE_BATCH_SIZE=false. + randomBatchSize := generateBatchSize() + fmt.Printf("coldata.BatchSize() is set to %d\n", randomBatchSize) + if err := coldata.SetBatchSizeForTests(randomBatchSize); err != nil { + colexecerror.InternalError(err) + } } return m.Run() }()) diff --git a/pkg/sql/colexec/mergejoinbase_tmpl.go b/pkg/sql/colexec/mergejoinbase_tmpl.go index 4d9de321fbf9..e6d0a62f6f1b 100644 --- a/pkg/sql/colexec/mergejoinbase_tmpl.go +++ b/pkg/sql/colexec/mergejoinbase_tmpl.go @@ -28,9 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/mergejoiner_tmpl.go b/pkg/sql/colexec/mergejoiner_tmpl.go index 380939be8894..7447e369dfed 100644 --- a/pkg/sql/colexec/mergejoiner_tmpl.go +++ b/pkg/sql/colexec/mergejoiner_tmpl.go @@ -30,9 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/min_max_agg_tmpl.go b/pkg/sql/colexec/min_max_agg_tmpl.go index f770c56b8d73..d7c5cb97a3ba 100644 --- a/pkg/sql/colexec/min_max_agg_tmpl.go +++ b/pkg/sql/colexec/min_max_agg_tmpl.go @@ -31,9 +31,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // Remove unused warning. var _ = colexecerror.InternalError diff --git a/pkg/sql/colexec/ordered_synchronizer_tmpl.go b/pkg/sql/colexec/ordered_synchronizer_tmpl.go index 27e820c167b5..1788ad52e862 100644 --- a/pkg/sql/colexec/ordered_synchronizer_tmpl.go +++ b/pkg/sql/colexec/ordered_synchronizer_tmpl.go @@ -36,9 +36,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/encoding" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/overloads_test.go b/pkg/sql/colexec/overloads_test.go index ade2112c6b82..4a748623e746 100644 --- a/pkg/sql/colexec/overloads_test.go +++ b/pkg/sql/colexec/overloads_test.go @@ -68,11 +68,11 @@ func TestIntegerDivision(t *testing.T) { var res apd.Decimal res = performDivInt16Int16(math.MinInt16, -1) - require.Equal(t, 0, res.Cmp(d.SetFinite(-math.MinInt16, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(-math.MinInt16))) res = performDivInt32Int32(math.MinInt32, -1) - require.Equal(t, 0, res.Cmp(d.SetFinite(-math.MinInt32, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(-math.MinInt32))) res = performDivInt64Int64(math.MinInt64, -1) - d.SetFinite(math.MinInt64, 0) + d.SetInt64(math.MinInt64) if _, err := tree.DecimalCtx.Neg(d, d); err != nil { t.Error(err) } @@ -83,11 +83,11 @@ func TestIntegerDivision(t *testing.T) { require.True(t, errors.Is(colexecerror.CatchVectorizedRuntimeError(func() { performDivInt64Int64(10, 0) }), tree.ErrDivByZero)) res = performDivInt16Int16(math.MaxInt16, -1) - require.Equal(t, 0, res.Cmp(d.SetFinite(-math.MaxInt16, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(-math.MaxInt16))) res = performDivInt32Int32(math.MaxInt32, -1) - require.Equal(t, 0, res.Cmp(d.SetFinite(-math.MaxInt32, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(-math.MaxInt32))) res = performDivInt64Int64(math.MaxInt64, -1) - require.Equal(t, 0, res.Cmp(d.SetFinite(-math.MaxInt64, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(-math.MaxInt64))) } func TestIntegerMultiplication(t *testing.T) { @@ -145,20 +145,20 @@ func TestMixedTypeInteger(t *testing.T) { var res apd.Decimal res = performDivInt16Int32(4, 2) - require.Equal(t, 0, res.Cmp(d.SetFinite(2, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(2))) res = performDivInt16Int64(6, 2) - require.Equal(t, 0, res.Cmp(d.SetFinite(3, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(3))) res = performDivInt64Int32(12, 3) - require.Equal(t, 0, res.Cmp(d.SetFinite(4, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(4))) res = performDivInt64Int16(20, 4) - require.Equal(t, 0, res.Cmp(d.SetFinite(5, 0))) + require.Equal(t, 0, res.Cmp(d.SetInt64(5))) } func TestDecimalDivByZero(t *testing.T) { defer leaktest.AfterTest(t)() nonZeroDec, zeroDec := apd.Decimal{}, apd.Decimal{} - nonZeroDec.SetFinite(4, 0) - zeroDec.SetFinite(0, 0) + nonZeroDec.SetInt64(4) + zeroDec.SetInt64(0) require.True(t, errors.Is(colexecerror.CatchVectorizedRuntimeError(func() { performDivDecimalInt16(nonZeroDec, 0) }), tree.ErrDivByZero)) require.True(t, errors.Is(colexecerror.CatchVectorizedRuntimeError(func() { performDivDecimalInt32(nonZeroDec, 0) }), tree.ErrDivByZero)) diff --git a/pkg/sql/colexec/proj_const_ops_tmpl.go b/pkg/sql/colexec/proj_const_ops_tmpl.go index f13787878dd1..c49adb453ec5 100644 --- a/pkg/sql/colexec/proj_const_ops_tmpl.go +++ b/pkg/sql/colexec/proj_const_ops_tmpl.go @@ -33,9 +33,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/proj_non_const_ops_tmpl.go b/pkg/sql/colexec/proj_non_const_ops_tmpl.go index c1ba4e86f2aa..622de348fa79 100644 --- a/pkg/sql/colexec/proj_non_const_ops_tmpl.go +++ b/pkg/sql/colexec/proj_non_const_ops_tmpl.go @@ -33,9 +33,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/rowstovec_tmpl.go b/pkg/sql/colexec/rowstovec_tmpl.go index 8bb7848b0ba0..cda93cf6fa5e 100644 --- a/pkg/sql/colexec/rowstovec_tmpl.go +++ b/pkg/sql/colexec/rowstovec_tmpl.go @@ -32,9 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // _CANONICAL_TYPE_FAMILY is the template variable. diff --git a/pkg/sql/colexec/select_in_tmpl.go b/pkg/sql/colexec/select_in_tmpl.go index f8e43d1075a9..fd50d656c20b 100644 --- a/pkg/sql/colexec/select_in_tmpl.go +++ b/pkg/sql/colexec/select_in_tmpl.go @@ -35,7 +35,6 @@ import ( // Remove unused warnings. var ( - _ = execgen.UNSAFEGET _ = colexecerror.InternalError ) diff --git a/pkg/sql/colexec/selection_ops_tmpl.go b/pkg/sql/colexec/selection_ops_tmpl.go index 35ee2d7ec80e..90f5796f9f8e 100644 --- a/pkg/sql/colexec/selection_ops_tmpl.go +++ b/pkg/sql/colexec/selection_ops_tmpl.go @@ -32,9 +32,6 @@ import ( "github.com/cockroachdb/errors" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/sort_tmpl.go b/pkg/sql/colexec/sort_tmpl.go index 6e731d372dab..8209d36db5d9 100644 --- a/pkg/sql/colexec/sort_tmpl.go +++ b/pkg/sql/colexec/sort_tmpl.go @@ -31,9 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/values_differ_tmpl.go b/pkg/sql/colexec/values_differ_tmpl.go index 59e770dbd08e..1a81ea1e201e 100644 --- a/pkg/sql/colexec/values_differ_tmpl.go +++ b/pkg/sql/colexec/values_differ_tmpl.go @@ -28,9 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/colexec/vec_comparators_tmpl.go b/pkg/sql/colexec/vec_comparators_tmpl.go index 048ca02cd963..149b323b51a4 100644 --- a/pkg/sql/colexec/vec_comparators_tmpl.go +++ b/pkg/sql/colexec/vec_comparators_tmpl.go @@ -29,9 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" ) -// Remove unused warning. -var _ = execgen.UNSAFEGET - // {{/* // Declarations to make the template compile properly. diff --git a/pkg/sql/comment_on_database.go b/pkg/sql/comment_on_database.go index bdf3a8ff1d4f..601a00accc60 100644 --- a/pkg/sql/comment_on_database.go +++ b/pkg/sql/comment_on_database.go @@ -22,7 +22,7 @@ import ( type commentOnDatabaseNode struct { n *tree.CommentOnDatabase - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor } // CommentOnDatabase add comment on a database. @@ -35,7 +35,6 @@ func (p *planner) CommentOnDatabase( if err != nil { return nil, err } - if err := p.CheckPrivilege(ctx, dbDesc, privilege.CREATE); err != nil { return nil, err } @@ -52,7 +51,7 @@ func (n *commentOnDatabaseNode) startExec(params runParams) error { sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", keys.DatabaseCommentType, - n.dbDesc.ID, + n.dbDesc.GetID(), *n.n.Comment) if err != nil { return err @@ -65,7 +64,7 @@ func (n *commentOnDatabaseNode) startExec(params runParams) error { sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0", keys.DatabaseCommentType, - n.dbDesc.ID) + n.dbDesc.GetID()) if err != nil { return err } @@ -75,7 +74,7 @@ func (n *commentOnDatabaseNode) startExec(params runParams) error { params.ctx, params.p.txn, EventLogCommentOnDatabase, - int32(n.dbDesc.ID), + int32(n.dbDesc.GetID()), int32(params.extendedEvalCtx.NodeID.SQLInstanceID()), struct { DatabaseName string diff --git a/pkg/sql/control_jobs.go b/pkg/sql/control_jobs.go index 1eae2545d610..3d00c259ea82 100644 --- a/pkg/sql/control_jobs.go +++ b/pkg/sql/control_jobs.go @@ -62,7 +62,7 @@ func (n *controlJobsNode) startExec(params runParams) error { case jobs.StatusPaused: err = reg.PauseRequested(params.ctx, params.p.txn, int64(jobID)) case jobs.StatusRunning: - err = reg.Resume(params.ctx, params.p.txn, int64(jobID)) + err = reg.Unpause(params.ctx, params.p.txn, int64(jobID)) case jobs.StatusCanceled: err = reg.CancelRequested(params.ctx, params.p.txn, int64(jobID)) default: diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 412631f126aa..f023d45b0c11 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -119,7 +119,7 @@ CREATE TABLE crdb_internal.node_build_info ( field STRING NOT NULL, value STRING NOT NULL )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { execCfg := p.ExecCfg() nodeID, _ := execCfg.NodeID.OptionalNodeID() // zero if not available @@ -153,7 +153,7 @@ CREATE TABLE crdb_internal.node_runtime_info ( field STRING NOT NULL, value STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "access the node runtime information"); err != nil { return err } @@ -210,12 +210,12 @@ CREATE TABLE crdb_internal.databases ( id INT NOT NULL, name STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachDatabaseDesc(ctx, p, nil /* all databases */, true, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { return addRow( - tree.NewDInt(tree.DInt(db.ID)), // id - tree.NewDString(db.Name), // name + tree.NewDInt(tree.DInt(db.GetID())), // id + tree.NewDString(db.GetName()), // name ) }) }, @@ -241,7 +241,7 @@ CREATE TABLE crdb_internal.tables ( audit_mode STRING NOT NULL, schema_name STRING NOT NULL )`, - generator: func(ctx context.Context, p *planner, dbDesc *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, dbDesc *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { row := make(tree.Datums, 14) worker := func(pusher rowPusher) error { descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) @@ -251,9 +251,8 @@ CREATE TABLE crdb_internal.tables ( dbNames := make(map[sqlbase.ID]string) // Record database descriptors for name lookups. for _, desc := range descs { - db, ok := desc.(*sqlbase.DatabaseDescriptor) - if ok { - dbNames[db.ID] = db.Name + if dbDesc, ok := desc.(*sqlbase.ImmutableDatabaseDescriptor); ok { + dbNames[dbDesc.GetID()] = dbDesc.GetName() } } @@ -301,7 +300,7 @@ CREATE TABLE crdb_internal.tables ( // Note: we do not use forEachTableDesc() here because we want to // include added and dropped descriptors. for _, desc := range descs { - table, ok := desc.(*sqlbase.TableDescriptor) + table, ok := desc.(*sqlbase.ImmutableTableDescriptor) if !ok || p.CheckAnyPrivilege(ctx, table) != nil { continue } @@ -313,7 +312,7 @@ CREATE TABLE crdb_internal.tables ( // effectively deleted. dbName = fmt.Sprintf("[%d]", table.GetParentID()) } - if err := addDesc(table, tree.NewDString(dbName), "public"); err != nil { + if err := addDesc(table.TableDesc(), tree.NewDString(dbName), "public"); err != nil { return err } } @@ -351,7 +350,7 @@ CREATE TABLE crdb_internal.schema_changes ( state STRING NOT NULL, direction STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) if err != nil { return err @@ -359,7 +358,7 @@ CREATE TABLE crdb_internal.schema_changes ( // Note: we do not use forEachTableDesc() here because we want to // include added and dropped descriptors. for _, desc := range descs { - table, ok := desc.(*sqlbase.TableDescriptor) + table, ok := desc.(*sqlbase.ImmutableTableDescriptor) if !ok || p.CheckAnyPrivilege(ctx, table) != nil { continue } @@ -414,11 +413,11 @@ CREATE TABLE crdb_internal.leases ( deleted BOOL NOT NULL )`, populate: func( - ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error, + ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error, ) (err error) { nodeID := tree.NewDInt(tree.DInt(int64(p.execCfg.NodeID.Get()))) p.LeaseMgr().VisitLeases(func(desc sqlbase.TableDescriptor, dropped bool, _ int, expiration tree.DTimestamp) (wantMore bool) { - if p.CheckAnyPrivilege(ctx, &desc) != nil { + if p.CheckAnyPrivilege(ctx, sqlbase.NewImmutableTableDescriptor(desc)) != nil { // TODO(ajwerner): inspect what type of error got returned. return true } @@ -467,7 +466,7 @@ CREATE TABLE crdb_internal.jobs ( coordinator_id INT )`, comment: `decoded job metadata from system.jobs (KV scan)`, - generator: func(ctx context.Context, p *planner, _ *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { currentUser := p.SessionData().User isAdmin, err := p.HasAdminRole(ctx) if err != nil { @@ -664,7 +663,7 @@ CREATE TABLE crdb_internal.node_statement_statistics ( rows_read INT NOT NULL, implicit_txn BOOL NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "access application statistics"); err != nil { return err } @@ -765,7 +764,7 @@ CREATE TABLE crdb_internal.node_txn_stats ( committed_count INT NOT NULL, implicit_count INT NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "access application statistics"); err != nil { return err } @@ -829,7 +828,7 @@ CREATE TABLE crdb_internal.session_trace ( message STRING NOT NULL, -- The logged message. age INTERVAL NOT NULL -- The age of this message relative to the beginning of the trace. )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { rows, err := p.ExtendedEvalContext().Tracing.getSessionTrace() if err != nil { return err @@ -857,7 +856,7 @@ CREATE TABLE crdb_internal.cluster_settings ( public BOOL NOT NULL, -- whether the setting is documented, which implies the user can expect support. description STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.cluster_settings"); err != nil { return err } @@ -889,7 +888,7 @@ CREATE TABLE crdb_internal.session_variables ( value STRING NOT NULL, hidden BOOL NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { for _, vName := range varNames { gen := varGen[vName] value := gen.Get(&p.extendedEvalCtx) @@ -918,7 +917,7 @@ CREATE TABLE crdb_internal.%s ( var crdbInternalLocalTxnsTable = virtualSchemaTable{ comment: "running user transactions visible by the current user (RAM; local node only)", schema: fmt.Sprintf(txnsSchemaPattern, "node_transactions"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.node_transactions"); err != nil { return err } @@ -938,7 +937,7 @@ var crdbInternalLocalTxnsTable = virtualSchemaTable{ var crdbInternalClusterTxnsTable = virtualSchemaTable{ comment: "running user transactions visible by the current user (cluster RPC; expensive!)", schema: fmt.Sprintf(txnsSchemaPattern, "cluster_transactions"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.cluster_transactions"); err != nil { return err } @@ -1051,7 +1050,7 @@ func getSessionID(session serverpb.Session) tree.Datum { var crdbInternalLocalQueriesTable = virtualSchemaTable{ comment: "running queries visible by current user (RAM; local node only)", schema: fmt.Sprintf(queriesSchemaPattern, "node_queries"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { req := p.makeSessionsRequest(ctx) ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() if err != nil { @@ -1070,7 +1069,7 @@ var crdbInternalLocalQueriesTable = virtualSchemaTable{ var crdbInternalClusterQueriesTable = virtualSchemaTable{ comment: "running queries visible by current user (cluster RPC; expensive!)", schema: fmt.Sprintf(queriesSchemaPattern, "cluster_queries"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { req := p.makeSessionsRequest(ctx) ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() if err != nil { @@ -1182,7 +1181,7 @@ CREATE TABLE crdb_internal.%s ( var crdbInternalLocalSessionsTable = virtualSchemaTable{ comment: "running sessions visible by current user (RAM; local node only)", schema: fmt.Sprintf(sessionsSchemaPattern, "node_sessions"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { req := p.makeSessionsRequest(ctx) ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() if err != nil { @@ -1201,7 +1200,7 @@ var crdbInternalLocalSessionsTable = virtualSchemaTable{ var crdbInternalClusterSessionsTable = virtualSchemaTable{ comment: "running sessions visible to current user (cluster RPC; expensive!)", schema: fmt.Sprintf(sessionsSchemaPattern, "cluster_sessions"), - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { req := p.makeSessionsRequest(ctx) ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() if err != nil { @@ -1309,7 +1308,7 @@ var crdbInternalLocalMetricsTable = virtualSchemaTable{ name STRING NOT NULL, -- name of the metric value FLOAT NOT NULL -- value of the metric )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.node_metrics"); err != nil { return err } @@ -1351,7 +1350,7 @@ CREATE TABLE crdb_internal.builtin_functions ( category STRING NOT NULL, details STRING NOT NULL )`, - populate: func(ctx context.Context, _ *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, _ *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { for _, name := range builtins.AllBuiltinNames { props, overloads := builtins.GetBuiltinProperties(name) for _, f := range overloads { @@ -1382,15 +1381,15 @@ CREATE TABLE crdb_internal.create_type_statements ( INDEX (descriptor_id) ) `, - populate: func(ctx context.Context, p *planner, db *DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTypeDesc(ctx, p, db, func(db *DatabaseDescriptor, sc string, typeDesc *TypeDescriptor) error { + populate: func(ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { + return forEachTypeDesc(ctx, p, db, func(db *sqlbase.ImmutableDatabaseDescriptor, sc string, typeDesc *sqlbase.ImmutableTypeDescriptor) error { switch typeDesc.Kind { case sqlbase.TypeDescriptor_ENUM: var enumLabels []string for i := range typeDesc.EnumMembers { enumLabels = append(enumLabels, typeDesc.EnumMembers[i].LogicalRepresentation) } - name, err := tree.NewUnresolvedObjectName(3, [3]string{typeDesc.Name, sc, db.Name}, 0) + name, err := tree.NewUnresolvedObjectName(3, [3]string{typeDesc.GetName(), sc, db.GetName()}, 0) if err != nil { return err } @@ -1400,12 +1399,12 @@ CREATE TABLE crdb_internal.create_type_statements ( EnumLabels: enumLabels, } if err := addRow( - tree.NewDInt(tree.DInt(db.ID)), // database_id - tree.NewDString(db.Name), // database_name - tree.NewDString(sc), // schema_name - tree.NewDInt(tree.DInt(typeDesc.ID)), // descriptor_id - tree.NewDString(typeDesc.Name), // descriptor_name - tree.NewDString(tree.AsString(node)), // create_statement + tree.NewDInt(tree.DInt(db.GetID())), // database_id + tree.NewDString(db.GetName()), // database_name + tree.NewDString(sc), // schema_name + tree.NewDInt(tree.DInt(typeDesc.GetID())), // descriptor_id + tree.NewDString(typeDesc.GetName()), // descriptor_name + tree.NewDString(tree.AsString(node)), // create_statement ); err != nil { return err } @@ -1448,13 +1447,13 @@ CREATE TABLE crdb_internal.create_statements ( INDEX(descriptor_id) ) `, virtualOnce, false, /* includesIndexEntries */ - func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.DatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error) error { + func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.ImmutableDatabaseDescriptor, scName string, + table *sqlbase.ImmutableTableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error) error { contextName := "" parentNameStr := tree.DNull if db != nil { - contextName = db.Name - parentNameStr = tree.NewDString(db.Name) + contextName = db.GetName() + parentNameStr = tree.NewDString(contextName) } scNameStr := tree.NewDString(scName) @@ -1525,7 +1524,7 @@ func showAlterStatementWithInterleave( contextName string, lCtx simpleSchemaResolver, allIdx []sqlbase.IndexDescriptor, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, alterStmts *tree.DArray, validateStmts *tree.DArray, ) error { @@ -1640,11 +1639,11 @@ CREATE TABLE crdb_internal.table_columns ( hidden BOOL NOT NULL ) `, - generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { row := make(tree.Datums, 8) worker := func(pusher rowPusher) error { return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor) error { tableID := tree.NewDInt(tree.DInt(table.ID)) tableName := tree.NewDString(table.Name) for i := range table.Columns { @@ -1697,13 +1696,13 @@ CREATE TABLE crdb_internal.table_indexes ( is_inverted BOOL NOT NULL ) `, - generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { primary := tree.NewDString("primary") secondary := tree.NewDString("secondary") row := make(tree.Datums, 7) worker := func(pusher rowPusher) error { return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor) error { tableID := tree.NewDInt(tree.DInt(table.ID)) tableName := tree.NewDString(table.Name) row = row[:0] @@ -1760,7 +1759,7 @@ CREATE TABLE crdb_internal.index_columns ( column_direction STRING ) `, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { key := tree.NewDString("key") storing := tree.NewDString("storing") extra := tree.NewDString("extra") @@ -1771,9 +1770,9 @@ CREATE TABLE crdb_internal.index_columns ( } return forEachTableDescAll(ctx, p, dbContext, hideVirtual, - func(parent *DatabaseDescriptor, _ string, table *TableDescriptor) error { + func(parent *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor) error { tableID := tree.NewDInt(tree.DInt(table.ID)) - parentName := parent.Name + parentName := parent.GetName() tableName := tree.NewDString(table.Name) reportIndex := func(idx *sqlbase.IndexDescriptor) error { @@ -1876,14 +1875,14 @@ CREATE TABLE crdb_internal.backward_dependencies ( dependson_details STRING ) `, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { fkDep := tree.NewDString("fk") viewDep := tree.NewDString("view") sequenceDep := tree.NewDString("sequence") interleaveDep := tree.NewDString("interleave") return forEachTableDescAllWithTableLookup(ctx, p, dbContext, hideVirtual, /* virtual tables have no backward/forward dependencies*/ - func(db *DatabaseDescriptor, _ string, table *TableDescriptor, tableLookup tableLookupFn) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor, tableLookup tableLookupFn) error { tableID := tree.NewDInt(tree.DInt(table.ID)) tableName := tree.NewDString(table.Name) @@ -1990,7 +1989,7 @@ CREATE TABLE crdb_internal.feature_usage ( usage_count INT NOT NULL ) `, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { for feature, count := range telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ReadOnly) { if count == 0 { // Skip over empty counters to avoid polluting the output. @@ -2025,13 +2024,13 @@ CREATE TABLE crdb_internal.forward_dependencies ( dependedonby_details STRING ) `, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { fkDep := tree.NewDString("fk") viewDep := tree.NewDString("view") interleaveDep := tree.NewDString("interleave") sequenceDep := tree.NewDString("sequence") return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no backward/forward dependencies*/ - func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor) error { tableID := tree.NewDInt(tree.DInt(table.ID)) tableName := tree.NewDString(table.Name) @@ -2177,7 +2176,7 @@ CREATE TABLE crdb_internal.ranges_no_leases ( split_enforced_until TIMESTAMP ) `, - generator: func(ctx context.Context, p *planner, _ *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { if err := p.RequireAdminRole(ctx, "read crdb_internal.ranges_no_leases"); err != nil { return nil, nil, err } @@ -2193,14 +2192,14 @@ CREATE TABLE crdb_internal.ranges_no_leases ( for _, desc := range descs { id := uint32(desc.GetID()) switch desc := desc.(type) { - case *sqlbase.TableDescriptor: + case *sqlbase.ImmutableTableDescriptor: parents[id] = uint32(desc.ParentID) tableNames[id] = desc.GetName() indexNames[id] = make(map[uint32]string) for _, idx := range desc.Indexes { indexNames[id][uint32(idx.ID)] = idx.Name } - case *sqlbase.DatabaseDescriptor: + case *sqlbase.ImmutableDatabaseDescriptor: dbNames[id] = desc.GetName() } } @@ -2399,7 +2398,7 @@ CREATE TABLE crdb_internal.zones ( full_config_sql STRING ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if !p.ExecCfg().Codec.ForSystemTenant() { return errorutil.UnsupportedWithMultiTenancy() } @@ -2472,7 +2471,7 @@ CREATE TABLE crdb_internal.zones ( if err != nil { return err } - if p.CheckAnyPrivilege(ctx, table) != nil { + if p.CheckAnyPrivilege(ctx, sqlbase.NewImmutableTableDescriptor(*table)) != nil { continue } } @@ -2619,7 +2618,7 @@ CREATE TABLE crdb_internal.gossip_nodes ( leases INT NOT NULL ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_nodes"); err != nil { return err } @@ -2739,7 +2738,7 @@ CREATE TABLE crdb_internal.gossip_liveness ( updated_at TIMESTAMP ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // ATTENTION: The contents of this table should only access gossip data // which is highly available. DO NOT CALL functions which require the // cluster to be healthy, such as StatusServer.Nodes(). @@ -2818,7 +2817,7 @@ CREATE TABLE crdb_internal.gossip_alerts ( value FLOAT NOT NULL -- value of the alert (depends on subsystem, can be NaN) ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_alerts"); err != nil { return err } @@ -2887,7 +2886,7 @@ CREATE TABLE crdb_internal.gossip_network ( target_id INT NOT NULL -- target node of a gossip connection ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_network"); err != nil { return err } @@ -3084,16 +3083,16 @@ CREATE TABLE crdb_internal.partitions ( subzone_id INT -- references a subzone id in the crdb_internal.zones table ) `, - generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { + generator: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { dbName := "" if dbContext != nil { - dbName = dbContext.Name + dbName = dbContext.GetName() } worker := func(pusher rowPusher) error { return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no partitions*/ - func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, _ string, table *ImmutableTableDescriptor) error { return table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { - return addPartitioningRows(ctx, p, dbName, table, index, &index.Partitioning, + return addPartitioningRows(ctx, p, dbName, table.TableDesc(), index, &index.Partitioning, tree.DNull /* parentName */, 0 /* colOffset */, pusher.pushRow) }) }) @@ -3133,7 +3132,7 @@ CREATE TABLE crdb_internal.kv_node_status ( activity JSON NOT NULL ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.kv_node_status"); err != nil { return err } @@ -3247,7 +3246,7 @@ CREATE TABLE crdb_internal.kv_store_status ( metrics JSON NOT NULL ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { if err := p.RequireAdminRole(ctx, "read crdb_internal.kv_store_status"); err != nil { return err } @@ -3359,7 +3358,7 @@ CREATE TABLE crdb_internal.predefined_comments ( COMMENT STRING )`, populate: func( - ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error, + ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error, ) error { tableCommentKey := tree.NewDInt(keys.TableCommentType) vt := p.getVirtualTabler() diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index fdb3ddddf658..414dd2f617f9 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -183,7 +183,8 @@ CREATE TABLE t.test (k INT); // We now want to create a pre-2.1 table descriptor with an // old-style bit column. We're going to edit the table descriptor // manually, without going through SQL. - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") for i := range tableDesc.Columns { if tableDesc.Columns[i].Name == "k" { tableDesc.Columns[i].Type.InternalType.VisibleType = 4 // Pre-2.1 BIT. @@ -219,7 +220,7 @@ CREATE TABLE t.test (k INT); if err := txn.SetSystemConfigTrigger(); err != nil { return err } - return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) + return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), tableDesc.DescriptorProto()) }); err != nil { t.Fatal(err) } @@ -276,7 +277,7 @@ SELECT column_name, character_maximum_length, numeric_precision, numeric_precisi } // And verify that this has re-set the fields. - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") found := false for i := range tableDesc.Columns { col := &tableDesc.Columns[i] diff --git a/pkg/sql/create_database.go b/pkg/sql/create_database.go index d3fc6e6368f3..9b21fc94c2d0 100644 --- a/pkg/sql/create_database.go +++ b/pkg/sql/create_database.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" ) @@ -78,10 +77,9 @@ func (p *planner) CreateDatabase(ctx context.Context, n *tree.CreateDatabase) (p func (n *createDatabaseNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("database")) - desc := sqlbase.MakeDatabaseDesc(n.n) - created, err := params.p.createDatabase( - params.ctx, &desc, n.n.IfNotExists, tree.AsStringWithFQNames(n.n, params.Ann())) + desc, created, err := params.p.createDatabase( + params.ctx, n.n, tree.AsStringWithFQNames(n.n, params.Ann())) if err != nil { return err } @@ -92,7 +90,7 @@ func (n *createDatabaseNode) startExec(params runParams) error { params.ctx, params.p.txn, EventLogCreateDatabase, - int32(desc.ID), + int32(desc.GetID()), int32(params.extendedEvalCtx.NodeID.SQLInstanceID()), struct { DatabaseName string @@ -103,7 +101,7 @@ func (n *createDatabaseNode) startExec(params runParams) error { return err } params.extendedEvalCtx.Descs.AddUncommittedDatabase( - desc.Name, desc.ID, descs.DBCreated) + desc.GetName(), desc.GetID(), descs.DBCreated) } return nil } diff --git a/pkg/sql/create_role.go b/pkg/sql/create_role.go index 9bc1938ad81f..97c1cce6b3cc 100644 --- a/pkg/sql/create_role.go +++ b/pkg/sql/create_role.go @@ -235,7 +235,7 @@ const usernameHelp = "Usernames are case insensitive, must start with a letter, var usernameRE = regexp.MustCompile(`^[\p{Ll}0-9_][---\p{Ll}0-9_.]*$`) -var blacklistedUsernames = map[string]struct{}{ +var blocklistedUsernames = map[string]struct{}{ security.NodeUser: {}, } @@ -243,19 +243,19 @@ var blacklistedUsernames = map[string]struct{}{ // it validates according to the usernameRE regular expression. // It rejects reserved user names. func NormalizeAndValidateUsername(username string) (string, error) { - username, err := NormalizeAndValidateUsernameNoBlacklist(username) + username, err := NormalizeAndValidateUsernameNoBlocklist(username) if err != nil { return "", err } - if _, ok := blacklistedUsernames[username]; ok { + if _, ok := blocklistedUsernames[username]; ok { return "", pgerror.Newf(pgcode.ReservedName, "username %q reserved", username) } return username, nil } -// NormalizeAndValidateUsernameNoBlacklist case folds the specified username and verifies +// NormalizeAndValidateUsernameNoBlocklist case folds the specified username and verifies // it validates according to the usernameRE regular expression. -func NormalizeAndValidateUsernameNoBlacklist(username string) (string, error) { +func NormalizeAndValidateUsernameNoBlocklist(username string) (string, error) { username = tree.Name(username).Normalize() if !usernameRE.MatchString(username) { return "", errors.WithHint(pgerror.Newf(pgcode.InvalidName, "username %q invalid", username), usernameHelp) diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index 72248a2b596c..0fa355e30897 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -27,7 +27,7 @@ import ( type createSequenceNode struct { n *tree.CreateSequence - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor } func (p *planner) CreateSequence(ctx context.Context, n *tree.CreateSequence) (planNode, error) { @@ -57,7 +57,7 @@ func (n *createSequenceNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("sequence")) isTemporary := n.n.Temporary - _, schemaID, err := getTableCreateParams(params, n.dbDesc.ID, isTemporary, n.n.Name.Table()) + _, schemaID, err := getTableCreateParams(params, n.dbDesc.GetID(), isTemporary, n.n.Name.Table()) if err != nil { if sqlbase.IsRelationAlreadyExistsError(err) && n.n.IfNotExists { return nil @@ -76,7 +76,7 @@ func (n *createSequenceNode) startExec(params runParams) error { func doCreateSequence( params runParams, context string, - dbDesc *DatabaseDescriptor, + dbDesc *sqlbase.ImmutableDatabaseDescriptor, schemaID sqlbase.ID, name *TableName, isTemporary bool, @@ -98,7 +98,7 @@ func doCreateSequence( desc, err := MakeSequenceTableDesc( name.Table(), opts, - dbDesc.ID, + dbDesc.GetID(), schemaID, id, params.creationTimeForNewTableDescriptor(), @@ -116,7 +116,7 @@ func doCreateSequence( key := sqlbase.MakeObjectNameKey( params.ctx, params.ExecCfg().Settings, - dbDesc.ID, + dbDesc.GetID(), schemaID, name.Table(), ).Key(params.ExecCfg().Codec) @@ -170,7 +170,7 @@ func MakeSequenceTableDesc( isTemporary bool, params *runParams, ) (sqlbase.MutableTableDescriptor, error) { - desc := InitTableDescriptor( + desc := sqlbase.InitTableDescriptor( id, parentID, schemaID, diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 6e841ec7941d..e7bddebf4cbc 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -46,7 +46,7 @@ import ( type createTableNode struct { n *tree.CreateTable - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor sourcePlan planNode run createTableRun @@ -203,7 +203,7 @@ func getTableCreateParams( return nil, 0, err } // Still return data in this case. - return tKey, schemaID, makeObjectAlreadyExistsError(desc, tableName) + return tKey, schemaID, makeObjectAlreadyExistsError(desc.DescriptorProto(), tableName) } else if err != nil { return nil, 0, err } @@ -214,7 +214,7 @@ func (n *createTableNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("table")) isTemporary := n.n.Temporary - tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.ID, isTemporary, n.n.Table.Table()) + tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.GetID(), isTemporary, n.n.Table.Table()) if err != nil { if sqlbase.IsRelationAlreadyExistsError(err) && n.n.IfNotExists { return nil @@ -270,7 +270,7 @@ func (n *createTableNode) startExec(params runParams) error { // If a new system table is being created (which should only be doable by // an internal user account), make sure it gets the correct privileges. privs := n.dbDesc.GetPrivileges() - if n.dbDesc.ID == keys.SystemDatabaseID { + if n.dbDesc.GetID() == keys.SystemDatabaseID { privs = sqlbase.NewDefaultPrivilegeDescriptor() } @@ -289,7 +289,7 @@ func (n *createTableNode) startExec(params runParams) error { } desc, err = makeTableDescIfAs(params, - n.n, n.dbDesc.ID, schemaID, id, creationTime, asCols, privs, params.p.EvalContext(), isTemporary) + n.n, n.dbDesc.GetID(), schemaID, id, creationTime, asCols, privs, params.p.EvalContext(), isTemporary) if err != nil { return err } @@ -301,7 +301,7 @@ func (n *createTableNode) startExec(params runParams) error { } } else { affected = make(map[sqlbase.ID]*sqlbase.MutableTableDescriptor) - desc, err = makeTableDesc(params, n.n, n.dbDesc.ID, schemaID, id, creationTime, privs, affected, isTemporary) + desc, err = makeTableDesc(params, n.n, n.dbDesc.GetID(), schemaID, id, creationTime, privs, affected, isTemporary) if err != nil { return err } @@ -523,11 +523,11 @@ func qualifyFKColErrorWithDB( if err != nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) } - schema, err := resolver.ResolveSchemaNameByID(ctx, txn, codec, db.ID, tbl.GetParentSchemaID()) + schema, err := resolver.ResolveSchemaNameByID(ctx, txn, codec, db.GetID(), tbl.GetParentSchemaID()) if err != nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) } - return tree.ErrString(tree.NewUnresolvedName(db.Name, schema, tbl.Name, col)) + return tree.ErrString(tree.NewUnresolvedName(db.GetName(), schema, tbl.Name, col)) } // FKTableState is the state of the referencing table resolveFK() is called on. @@ -1065,28 +1065,6 @@ var CreatePartitioningCCL = func( "creating or manipulating partitions requires a CCL binary")) } -// InitTableDescriptor returns a blank TableDescriptor. -func InitTableDescriptor( - id, parentID, parentSchemaID sqlbase.ID, - name string, - creationTime hlc.Timestamp, - privileges *sqlbase.PrivilegeDescriptor, - temporary bool, -) sqlbase.MutableTableDescriptor { - return *sqlbase.NewMutableCreatedTableDescriptor(sqlbase.TableDescriptor{ - ID: id, - Name: name, - ParentID: parentID, - UnexposedParentSchemaID: parentSchemaID, - FormatVersion: sqlbase.InterleavedFormatVersion, - Version: 1, - ModificationTime: creationTime, - Privileges: privileges, - CreateAsOfTime: creationTime, - Temporary: temporary, - }) -} - func getFinalSourceQuery(source *tree.Select, evalCtx *tree.EvalContext) string { // Ensure that all the table names pretty-print as fully qualified, so we // store that in the table descriptor. @@ -1097,7 +1075,7 @@ func getFinalSourceQuery(source *tree.Select, evalCtx *tree.EvalContext) string // We use tree.FormatNode merely as a traversal method; its output buffer is // discarded immediately after the traversal because it is not needed // further. - f := tree.NewFmtCtx(tree.FmtParsable) + f := tree.NewFmtCtx(tree.FmtSerializable) f.SetReformatTableNames( func(_ *tree.FmtCtx, tn *tree.TableName) { // Persist the database prefix expansion. @@ -1113,7 +1091,7 @@ func getFinalSourceQuery(source *tree.Select, evalCtx *tree.EvalContext) string f.Close() // Substitute placeholders with their values. - ctx := tree.NewFmtCtx(tree.FmtParsable) + ctx := tree.NewFmtCtx(tree.FmtSerializable) ctx.SetPlaceholderFormat(func(ctx *tree.FmtCtx, placeholder *tree.Placeholder) { d, err := placeholder.Eval(evalCtx) if err != nil { @@ -1223,7 +1201,7 @@ func MakeTableDesc( // been populated. columnDefaultExprs := make([]tree.TypedExpr, len(n.Defs)) - desc := InitTableDescriptor( + desc := sqlbase.InitTableDescriptor( id, parentID, parentSchemaID, n.Table.Table(), creationTime, privileges, temporary, ) @@ -1833,7 +1811,7 @@ func makeTableDesc( // it needs to pull in descriptors from FK depended-on tables // and interleaved parents using their current state in KV. // See the comment at the start of MakeTableDesc() and resolveFK(). - params.p.runWithOptions(resolveFlags{skipCache: true}, func() { + params.p.runWithOptions(resolveFlags{skipCache: true, contextDatabaseID: parentID}, func() { ret, err = MakeTableDesc( params.ctx, params.p.txn, @@ -1988,16 +1966,20 @@ func replaceLikeTableOpts(n *tree.CreateTable, params runParams) (tree.TableDefs return newDefs, nil } -func makeObjectAlreadyExistsError(collidingObject sqlbase.DescriptorProto, name string) error { - switch collidingObject.(type) { - case *TableDescriptor: +func makeObjectAlreadyExistsError(collidingObject *sqlbase.Descriptor, name string) error { + switch collidingObject.Union.(type) { + case *sqlbase.Descriptor_Table: return sqlbase.NewRelationAlreadyExistsError(name) - case *TypeDescriptor: + case *sqlbase.Descriptor_Type: return sqlbase.NewTypeAlreadyExistsError(name) - case *DatabaseDescriptor: + case *sqlbase.Descriptor_Database: return sqlbase.NewDatabaseAlreadyExistsError(name) + case *sqlbase.Descriptor_Schema: + // TODO(ajwerner): Add a case for an existing schema object. + return errors.AssertionFailedf("schema exists with name %v", name) + default: + return errors.AssertionFailedf("unknown type %T exists with name %v", collidingObject.Union, name) } - return nil } // makeShardColumnDesc returns a new column descriptor for a hidden computed shard column diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index 4bcd3b5f5771..efd1a2562ca8 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -268,7 +268,7 @@ func verifyTables( if err := kvDB.GetProto(context.Background(), descKey, desc); err != nil { t.Fatal(err) } - if (*desc != sqlbase.Descriptor{}) { + if !desc.Equal(sqlbase.Descriptor{}) { t.Fatalf("extra descriptor with id %d", id) } } diff --git a/pkg/sql/create_type.go b/pkg/sql/create_type.go index d246d2224cf6..02e5eb8ef805 100644 --- a/pkg/sql/create_type.go +++ b/pkg/sql/create_type.go @@ -50,7 +50,7 @@ func (n *createTypeNode) startExec(params runParams) error { func resolveNewTypeName( params runParams, name *tree.UnresolvedObjectName, -) (*tree.TypeName, *DatabaseDescriptor, error) { +) (*tree.TypeName, *sqlbase.ImmutableDatabaseDescriptor, error) { // Resolve the target schema and database. db, prefix, err := params.p.ResolveUncachedDatabase(params.ctx, name) if err != nil { @@ -62,7 +62,7 @@ func resolveNewTypeName( } // Disallow type creation in the system database. - if db.ID == keys.SystemDatabaseID { + if db.GetID() == keys.SystemDatabaseID { return nil, nil, errors.New("cannot create a type in the system database") } @@ -75,21 +75,21 @@ func resolveNewTypeName( // TypeName and returns the key for the new type descriptor, the ID of the // new type, the parent database and parent schema id. func getCreateTypeParams( - params runParams, name *tree.TypeName, db *DatabaseDescriptor, + params runParams, name *tree.TypeName, db *sqlbase.ImmutableDatabaseDescriptor, ) (sqlbase.DescriptorKey, sqlbase.ID, error) { // TODO (rohany): This should be named object key. - typeKey := sqlbase.MakePublicTableNameKey(params.ctx, params.ExecCfg().Settings, db.ID, name.Type()) + typeKey := sqlbase.MakePublicTableNameKey(params.ctx, params.ExecCfg().Settings, db.GetID(), name.Type()) // As of now, we can only create types in the public schema. schemaID := sqlbase.ID(keys.PublicSchemaID) exists, collided, err := sqlbase.LookupObjectID( - params.ctx, params.p.txn, params.ExecCfg().Codec, db.ID, schemaID, name.Type()) + params.ctx, params.p.txn, params.ExecCfg().Codec, db.GetID(), schemaID, name.Type()) if err == nil && exists { // Try and see what kind of object we collided with. desc, err := catalogkv.GetDescriptorByID(params.ctx, params.p.txn, params.ExecCfg().Codec, collided) if err != nil { return nil, 0, err } - return nil, 0, makeObjectAlreadyExistsError(desc, name.String()) + return nil, 0, makeObjectAlreadyExistsError(desc.DescriptorProto(), name.String()) } if err != nil { return nil, 0, err @@ -112,7 +112,7 @@ func (p *planner) createArrayType( n *tree.CreateType, typ *tree.TypeName, typDesc *sqlbase.MutableTypeDescriptor, - db *DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, ) (sqlbase.ID, error) { // Postgres starts off trying to create the type as _. It then // continues adding "_" to the front of the name until it doesn't find @@ -126,7 +126,7 @@ func (p *planner) createArrayType( params.ctx, params.p.txn, params.ExecCfg().Codec, - db.ID, + db.GetID(), schemaID, arrayTypeName, ) @@ -138,7 +138,7 @@ func (p *planner) createArrayType( arrayTypeKey = sqlbase.MakePublicTableNameKey( params.ctx, params.ExecCfg().Settings, - db.ID, + db.GetID(), arrayTypeName, ) break @@ -158,17 +158,22 @@ func (p *planner) createArrayType( var elemTyp *types.T switch t := typDesc.Kind; t { case sqlbase.TypeDescriptor_ENUM: - elemTyp = types.MakeEnum(uint32(typDesc.ID), uint32(id)) + elemTyp = types.MakeEnum(uint32(typDesc.GetID()), uint32(id)) default: return 0, errors.AssertionFailedf("cannot make array type for kind %s", t.String()) } // Construct the descriptor for the array type. - arrayTypDesc := sqlbase.NewMutableCreatedTypeDescriptor(sqlbase.MakeTypeDescriptor( - db.ID, keys.PublicSchemaID, id, arrayTypeName, - )) - arrayTypDesc.Kind = sqlbase.TypeDescriptor_ALIAS - arrayTypDesc.Alias = types.MakeArray(elemTyp) + // TODO(ajwerner): This is getting fixed up in a later commit to deal with + // meta, just hold on. + arrayTypDesc := sqlbase.NewMutableCreatedTypeDescriptor(sqlbase.TypeDescriptor{ + Name: arrayTypeName, + ID: id, + ParentID: db.GetID(), + ParentSchemaID: keys.PublicSchemaID, + Kind: sqlbase.TypeDescriptor_ALIAS, + Alias: types.MakeArray(elemTyp), + }) jobStr := fmt.Sprintf("implicit array type creation for %s", tree.AsStringWithFQNames(n, params.Ann())) if err := p.createDescriptorWithID( @@ -235,11 +240,14 @@ func (p *planner) createEnum(params runParams, n *tree.CreateType) error { // a free list of descriptor ID's (#48438), we should allocate an ID from // there if id + oidext.CockroachPredefinedOIDMax overflows past the // maximum uint32 value. - typeDesc := sqlbase.NewMutableCreatedTypeDescriptor(sqlbase.MakeTypeDescriptor( - db.ID, keys.PublicSchemaID, id, typeName.Type(), - )) - typeDesc.Kind = sqlbase.TypeDescriptor_ENUM - typeDesc.EnumMembers = members + typeDesc := sqlbase.NewMutableCreatedTypeDescriptor(sqlbase.TypeDescriptor{ + Name: typeName.Type(), + ID: id, + ParentID: db.GetID(), + ParentSchemaID: keys.PublicSchemaID, + Kind: sqlbase.TypeDescriptor_ENUM, + EnumMembers: members, + }) // Create the implicit array type for this type before finishing the type. arrayTypeID, err := p.createArrayType(params, n, typeName, typeDesc, db) diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index 5a08834f5367..4fe1ea8e7f6b 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -36,7 +36,7 @@ type createViewNode struct { ifNotExists bool replace bool temporary bool - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor columns sqlbase.ResultColumns // planDeps tracks which tables and views the view being created @@ -78,7 +78,7 @@ func (n *createViewNode) startExec(params runParams) error { var replacingDesc *sqlbase.MutableTableDescriptor - tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.ID, isTemporary, viewName) + tKey, schemaID, err := getTableCreateParams(params, n.dbDesc.GetID(), isTemporary, viewName) if err != nil { switch { case !sqlbase.IsRelationAlreadyExistsError(err): @@ -137,7 +137,7 @@ func (n *createViewNode) startExec(params runParams) error { params.ctx, viewName, n.viewQuery, - n.dbDesc.ID, + n.dbDesc.GetID(), schemaID, id, n.columns, @@ -203,7 +203,7 @@ func (n *createViewNode) startExec(params runParams) error { // Log Create View event. This is an auditable log event and is // recorded in the same transaction as the table descriptor update. - tn := tree.MakeTableNameWithSchema(tree.Name(n.dbDesc.Name), schemaName, n.viewName) + tn := tree.MakeTableNameWithSchema(tree.Name(n.dbDesc.GetName()), schemaName, n.viewName) return MakeEventLogger(params.extendedEvalCtx.ExecCfg).InsertEventRecord( params.ctx, params.p.txn, @@ -247,7 +247,7 @@ func makeViewTableDesc( evalCtx *tree.EvalContext, temporary bool, ) (sqlbase.MutableTableDescriptor, error) { - desc := InitTableDescriptor( + desc := sqlbase.InitTableDescriptor( id, parentID, schemaID, diff --git a/pkg/sql/database.go b/pkg/sql/database.go index dd4736af13f2..db50a03d8ac9 100644 --- a/pkg/sql/database.go +++ b/pkg/sql/database.go @@ -31,11 +31,12 @@ import ( // renameDatabase implements the DatabaseDescEditor interface. func (p *planner) renameDatabase( - ctx context.Context, oldDesc *sqlbase.DatabaseDescriptor, newName string, + ctx context.Context, oldDesc *sqlbase.ImmutableDatabaseDescriptor, newName string, ) error { - oldName := oldDesc.Name - oldDesc.SetName(newName) - if err := oldDesc.Validate(); err != nil { + oldName := oldDesc.GetName() + newDesc := sqlbase.NewMutableDatabaseDescriptor(*oldDesc.DatabaseDesc()) + newDesc.SetName(newName) + if err := newDesc.Validate(); err != nil { return err } @@ -48,9 +49,9 @@ func (p *planner) renameDatabase( newKey := sqlbase.MakeDatabaseNameKey(ctx, p.ExecCfg().Settings, newName).Key(p.ExecCfg().Codec) - descID := oldDesc.GetID() + descID := newDesc.GetID() descKey := sqlbase.MakeDescMetadataKey(p.ExecCfg().Codec, descID) - descDesc := sqlbase.WrapDescriptor(oldDesc) + descDesc := newDesc.DescriptorProto() b := &kv.Batch{} if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { diff --git a/pkg/sql/database_test.go b/pkg/sql/database_test.go index 37f422807aaf..85a6d70f45fa 100644 --- a/pkg/sql/database_test.go +++ b/pkg/sql/database_test.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/database" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) @@ -33,15 +32,15 @@ func TestDatabaseAccessors(t *testing.T) { defer s.Stopper().Stop(context.Background()) if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { - if _, err := catalogkv.GetDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, sqlbase.SystemDB.ID); err != nil { + if _, err := catalogkv.GetDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, keys.SystemDatabaseID); err != nil { return err } - if _, err := catalogkv.MustGetDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, sqlbase.SystemDB.ID); err != nil { + if _, err := catalogkv.MustGetDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, keys.SystemDatabaseID); err != nil { return err } databaseCache := database.NewCache(keys.SystemSQLCodec, config.NewSystemConfig(zonepb.DefaultZoneConfigRef())) - _, err := databaseCache.GetDatabaseDescByID(ctx, txn, sqlbase.SystemDB.ID) + _, err := databaseCache.GetDatabaseDescByID(ctx, txn, keys.SystemDatabaseID) return err }); err != nil { t.Fatal(err) diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 37b6b1456540..88883fa84891 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -21,8 +21,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" ) // @@ -47,10 +49,12 @@ var ( // state should be an error (false) or a no-op (true). // createDatabase implements the DatabaseDescEditor interface. func (p *planner) createDatabase( - ctx context.Context, desc *sqlbase.DatabaseDescriptor, ifNotExists bool, jobDesc string, -) (bool, error) { + ctx context.Context, database *tree.CreateDatabase, jobDesc string, +) (*sqlbase.ImmutableDatabaseDescriptor, bool, error) { + + dbName := string(database.Name) shouldCreatePublicSchema := true - dKey := sqlbase.MakeDatabaseNameKey(ctx, p.ExecCfg().Settings, desc.Name) + dKey := sqlbase.MakeDatabaseNameKey(ctx, p.ExecCfg().Settings, dbName) // TODO(solon): This conditional can be removed in 20.2. Every database // is created with a public schema for cluster version >= 20.1, so we can remove // the `shouldCreatePublicSchema` logic as well. @@ -58,23 +62,28 @@ func (p *planner) createDatabase( shouldCreatePublicSchema = false } - if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, p.ExecCfg().Codec, desc.Name); err == nil && exists { - if ifNotExists { + if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, p.ExecCfg().Codec, dbName); err == nil && exists { + if database.IfNotExists { // Noop. - return false, nil + return nil, false, nil } - return false, sqlbase.NewDatabaseAlreadyExistsError(desc.Name) + return nil, false, sqlbase.NewDatabaseAlreadyExistsError(dbName) } else if err != nil { - return false, err + return nil, false, err } id, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec) if err != nil { - return false, err + return nil, false, err } + // TODO(ajwerner): Consider whether this should be returning a + // MutableDatabaseDescriptor and where/how this will interact with the + // descs.Collection (now it happens well above this call, which is probably + // fine). + desc := sqlbase.NewInitialDatabaseDescriptor(id, string(database.Name)) if err := p.createDescriptorWithID(ctx, dKey.Key(p.ExecCfg().Codec), id, desc, nil, jobDesc); err != nil { - return true, err + return nil, true, err } // TODO(solon): This check should be removed and a public schema should @@ -82,22 +91,28 @@ func (p *planner) createDatabase( if shouldCreatePublicSchema { // Every database must be initialized with the public schema. if err := p.createSchemaWithID(ctx, sqlbase.NewPublicSchemaKey(id).Key(p.ExecCfg().Codec), keys.PublicSchemaID); err != nil { - return true, err + return nil, true, err } } - return true, nil + return desc, true, nil } func (p *planner) createDescriptorWithID( ctx context.Context, idKey roachpb.Key, id sqlbase.ID, - descriptor sqlbase.DescriptorProto, + descriptor sqlbase.DescriptorInterface, st *cluster.Settings, jobDesc string, ) error { - descriptor.SetID(id) + if descriptor.GetID() == 0 { + // TODO(ajwerner): Return the error here rather than fatal. + log.Fatalf(ctx, "%v", errors.AssertionFailedf("cannot create descriptor with an empty ID: %v", descriptor)) + } + if descriptor.GetID() != id { + log.Fatalf(ctx, "%v", errors.AssertionFailedf("cannot create descriptor with an unexpected (%v) ID: %v", id, descriptor)) + } // TODO(pmattis): The error currently returned below is likely going to be // difficult to interpret. // diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index 76efa8ace2e0..38a1046d8456 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -36,11 +36,11 @@ type mutationTest struct { testing.TB *sqlutils.SQLRunner kvDB *kv.DB - tableDesc *sqlbase.TableDescriptor + tableDesc *sqlbase.MutableTableDescriptor } func makeMutationTest( - t *testing.T, kvDB *kv.DB, db *gosql.DB, tableDesc *sqlbase.TableDescriptor, + t *testing.T, kvDB *kv.DB, db *gosql.DB, tableDesc *sqlbase.MutableTableDescriptor, ) mutationTest { return mutationTest{ TB: t, @@ -87,7 +87,7 @@ func (mt mutationTest) makeMutationsActive() { if err := mt.kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), - sqlbase.WrapDescriptor(mt.tableDesc), + mt.tableDesc.DescriptorProto(), ); err != nil { mt.Fatal(err) } @@ -145,7 +145,7 @@ func (mt mutationTest) writeMutation(m sqlbase.DescriptorMutation) { if err := mt.kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), - sqlbase.WrapDescriptor(mt.tableDesc), + mt.tableDesc.DescriptorProto(), ); err != nil { mt.Fatal(err) } @@ -180,7 +180,8 @@ ALTER TABLE t.test ADD COLUMN i VARCHAR NOT NULL DEFAULT 'i'; } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) // Add column "i" as a mutation in delete/write. @@ -239,7 +240,8 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -254,7 +256,8 @@ CREATE INDEX allidx ON t.test (k, v); // Init table to start state. mTest.Exec(t, `TRUNCATE TABLE t.test`) // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + mTest.tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z", "q"}} for _, row := range initRows { @@ -500,7 +503,8 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -515,7 +519,8 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); t.Fatal(err) } // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + mTest.tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z"}, {"b", "y"}} for _, row := range initRows { @@ -651,7 +656,8 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -679,7 +685,8 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + mTest.tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z", "q"}, {"b", "y", "r"}} for _, row := range initRows { @@ -852,7 +859,8 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); } // Read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") mt := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -982,7 +990,7 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); mt.Exec(t, `ALTER TABLE t.test RENAME COLUMN c TO d`) // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. - mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + mt.tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Make "ufo" live. mt.makeMutationsActive() @@ -1006,7 +1014,8 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. - mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + mt.tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") // Make column "e" live. mt.makeMutationsActive() @@ -1156,13 +1165,14 @@ func TestAddingFKs(t *testing.T) { } // Step the referencing table back to the ADD state. - ordersDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "orders") + ordersDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "orders") ordersDesc.State = sqlbase.TableDescriptor_ADD ordersDesc.Version++ if err := kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, ordersDesc.ID), - sqlbase.WrapDescriptor(ordersDesc), + ordersDesc.DescriptorProto(), ); err != nil { t.Fatal(err) } diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index cc84652464da..994c89789a53 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -172,7 +172,7 @@ func (dsp *DistSQLPlanner) SetSpanResolver(spanResolver physicalplan.SpanResolve } // distSQLExprCheckVisitor is a tree.Visitor that checks if expressions -// contain things not supported by distSQL, like distSQL-blacklisted functions. +// contain things not supported by distSQL, like distSQL-blocklisted functions. type distSQLExprCheckVisitor struct { err error } @@ -185,7 +185,7 @@ func (v *distSQLExprCheckVisitor) VisitPre(expr tree.Expr) (recurse bool, newExp } switch t := expr.(type) { case *tree.FuncExpr: - if t.IsDistSQLBlacklist() { + if t.IsDistSQLBlocklist() { v.err = newQueryNotSupportedErrorf("function %s cannot be executed with distsql", t) return false, expr } @@ -206,7 +206,7 @@ func (v *distSQLExprCheckVisitor) VisitPre(expr tree.Expr) (recurse bool, newExp func (v *distSQLExprCheckVisitor) VisitPost(expr tree.Expr) tree.Expr { return expr } // checkExpr verifies that an expression doesn't contain things that are not yet -// supported by distSQL, like distSQL-blacklisted functions. +// supported by distSQL, like distSQL-blocklisted functions. func checkExpr(expr tree.Expr) error { if expr == nil { return nil @@ -908,38 +908,29 @@ func tableOrdinal( panic(fmt.Sprintf("column %d not in desc.Columns", colID)) } -// getScanNodeToTableOrdinalMap returns a map from scan node column ordinal to -// table reader column ordinal. Returns nil if the map is identity. -// -// scanNodes can have columns set up in a few different ways, depending on the -// colCfg. The heuristic planner always creates scanNodes with all public -// columns (even if some of them aren't even in the index we are scanning). -// The optimizer creates scanNodes with a specific set of wanted columns; in -// this case we have to create a map from scanNode column ordinal to table -// column ordinal (which is what the TableReader uses). -func getScanNodeToTableOrdinalMap(n *scanNode) []int { - if n.colCfg.wantedColumns == nil { - return nil - } - if n.colCfg.addUnwantedAsHidden { - panic("addUnwantedAsHidden not supported") - } - res := make([]int, len(n.cols)) +// toTableOrdinals returns a mapping from column ordinals in cols to table +// reader column ordinals. +func toTableOrdinals( + cols []sqlbase.ColumnDescriptor, + desc *sqlbase.ImmutableTableDescriptor, + visibility execinfrapb.ScanVisibility, +) []int { + res := make([]int, len(cols)) for i := range res { - res[i] = tableOrdinal(n.desc, n.cols[i].ID, n.colCfg.visibility) + res[i] = tableOrdinal(desc, cols[i].ID, visibility) } return res } -// getOutputColumnsFromScanNode returns the indices of the columns that are -// returned by a scanNode. +// getOutputColumnsFromColsForScan returns the indices of the columns that are +// returned by a scanNode or a tableReader. // If remap is not nil, the column ordinals are remapped accordingly. -func getOutputColumnsFromScanNode(n *scanNode, remap []int) []uint32 { - outputColumns := make([]uint32, 0, len(n.cols)) +func getOutputColumnsFromColsForScan(cols []sqlbase.ColumnDescriptor, remap []int) []uint32 { + outputColumns := make([]uint32, 0, len(cols)) // TODO(radu): if we have a scan with a filter, cols will include the // columns needed for the filter, even if they aren't needed for the next // stage. - for i := 0; i < len(n.cols); i++ { + for i := 0; i < len(cols); i++ { colIdx := i if remap != nil { colIdx = remap[i] @@ -1040,66 +1031,114 @@ func (dsp *DistSQLPlanner) CheckNodeHealthAndVersion( func (dsp *DistSQLPlanner) createTableReaders( planCtx *PlanningCtx, n *scanNode, ) (*PhysicalPlan, error) { - scanNodeToTableOrdinalMap := getScanNodeToTableOrdinalMap(n) + if n.colCfg.addUnwantedAsHidden { + panic("addUnwantedAsHidden not supported") + } + // scanNodeToTableOrdinalMap is a map from scan node column ordinal to + // table reader column ordinal. + scanNodeToTableOrdinalMap := toTableOrdinals(n.cols, n.desc, n.colCfg.visibility) spec, post, err := initTableReaderSpec(n, planCtx, scanNodeToTableOrdinalMap) if err != nil { return nil, err } - var spanPartitions []SpanPartition + var p PhysicalPlan + err = dsp.planTableReaders( + planCtx, + &p, + &tableReaderPlanningInfo{ + spec: spec, + post: post, + desc: n.desc, + spans: n.spans, + reverse: n.reverse, + scanVisibility: n.colCfg.visibility, + maxResults: n.maxResults, + estimatedRowCount: n.estimatedRowCount, + reqOrdering: n.reqOrdering, + cols: n.cols, + colsToTableOrdrinalMap: scanNodeToTableOrdinalMap, + }, + ) + return &p, err +} + +// tableReaderPlanningInfo is a utility struct that contains the information +// needed to perform the physical planning of table readers once the specs have +// been created. See scanNode to get more context on some of the fields. +type tableReaderPlanningInfo struct { + spec *execinfrapb.TableReaderSpec + post execinfrapb.PostProcessSpec + desc *sqlbase.ImmutableTableDescriptor + spans []roachpb.Span + reverse bool + scanVisibility execinfrapb.ScanVisibility + maxResults uint64 + estimatedRowCount uint64 + reqOrdering ReqOrdering + cols []sqlbase.ColumnDescriptor + colsToTableOrdrinalMap []int +} + +func (dsp *DistSQLPlanner) planTableReaders( + planCtx *PlanningCtx, p *PhysicalPlan, info *tableReaderPlanningInfo, +) error { + var ( + spanPartitions []SpanPartition + err error + ) if planCtx.isLocal { - spanPartitions = []SpanPartition{{dsp.nodeDesc.NodeID, n.spans}} - } else if n.hardLimit == 0 { + spanPartitions = []SpanPartition{{dsp.nodeDesc.NodeID, info.spans}} + } else if info.post.Limit == 0 { // No hard limit - plan all table readers where their data live. Note // that we're ignoring soft limits for now since the TableReader will // still read too eagerly in the soft limit case. To prevent this we'll // need a new mechanism on the execution side to modulate table reads. // TODO(yuzefovich): add that mechanism. - spanPartitions, err = dsp.PartitionSpans(planCtx, n.spans) + spanPartitions, err = dsp.PartitionSpans(planCtx, info.spans) if err != nil { - return nil, err + return err } } else { // If the scan has a hard limit, use a single TableReader to avoid // reading more rows than necessary. - nodeID, err := dsp.getNodeIDForScan(planCtx, n.spans, n.reverse) + nodeID, err := dsp.getNodeIDForScan(planCtx, info.spans, info.reverse) if err != nil { - return nil, err + return err } - spanPartitions = []SpanPartition{{nodeID, n.spans}} + spanPartitions = []SpanPartition{{nodeID, info.spans}} } - var p PhysicalPlan stageID := p.NewStageID() p.ResultRouters = make([]physicalplan.ProcessorIdx, len(spanPartitions)) p.Processors = make([]physicalplan.Processor, 0, len(spanPartitions)) - returnMutations := n.colCfg.visibility == execinfra.ScanVisibilityPublicAndNotPublic + returnMutations := info.scanVisibility == execinfra.ScanVisibilityPublicAndNotPublic for i, sp := range spanPartitions { var tr *execinfrapb.TableReaderSpec if i == 0 { // For the first span partition, we can just directly use the spec we made // above. - tr = spec + tr = info.spec } else { // For the rest, we have to copy the spec into a fresh spec. tr = physicalplan.NewTableReaderSpec() // Grab the Spans field of the new spec, and reuse it in case the pooled // TableReaderSpec we got has pre-allocated Spans memory. newSpansSlice := tr.Spans - *tr = *spec + *tr = *info.spec tr.Spans = newSpansSlice } for j := range sp.Spans { tr.Spans = append(tr.Spans, execinfrapb.TableReaderSpan{Span: sp.Spans[j]}) } - tr.MaxResults = n.maxResults - p.TotalEstimatedScannedRows += n.estimatedRowCount - if n.estimatedRowCount > p.MaxEstimatedRowCount { - p.MaxEstimatedRowCount = n.estimatedRowCount + tr.MaxResults = info.maxResults + p.TotalEstimatedScannedRows += info.estimatedRowCount + if info.estimatedRowCount > p.MaxEstimatedRowCount { + p.MaxEstimatedRowCount = info.estimatedRowCount } proc := physicalplan.Processor{ @@ -1115,47 +1154,47 @@ func (dsp *DistSQLPlanner) createTableReaders( p.ResultRouters[i] = pIdx } - if len(p.ResultRouters) > 1 && len(n.reqOrdering) > 0 { + if len(p.ResultRouters) > 1 && len(info.reqOrdering) > 0 { // Make a note of the fact that we have to maintain a certain ordering // between the parallel streams. // // This information is taken into account by the AddProjection call below: // specifically, it will make sure these columns are kept even if they are // not in the projection (e.g. "SELECT v FROM kv ORDER BY k"). - p.SetMergeOrdering(dsp.convertOrdering(n.reqOrdering, scanNodeToTableOrdinalMap)) + p.SetMergeOrdering(dsp.convertOrdering(info.reqOrdering, info.colsToTableOrdrinalMap)) } var typs []*types.T if returnMutations { - typs = make([]*types.T, 0, len(n.desc.Columns)+len(n.desc.MutationColumns())) + typs = make([]*types.T, 0, len(info.desc.Columns)+len(info.desc.MutationColumns())) } else { - typs = make([]*types.T, 0, len(n.desc.Columns)) + typs = make([]*types.T, 0, len(info.desc.Columns)) } - for i := range n.desc.Columns { - typs = append(typs, n.desc.Columns[i].Type) + for i := range info.desc.Columns { + typs = append(typs, info.desc.Columns[i].Type) } if returnMutations { - for _, col := range n.desc.MutationColumns() { + for _, col := range info.desc.MutationColumns() { typs = append(typs, col.Type) } } - p.SetLastStagePost(post, typs) + p.SetLastStagePost(info.post, typs) - outCols := getOutputColumnsFromScanNode(n, scanNodeToTableOrdinalMap) - planToStreamColMap := make([]int, len(n.cols)) - descColumnIDs := make([]sqlbase.ColumnID, 0, len(n.desc.Columns)) - for i := range n.desc.Columns { - descColumnIDs = append(descColumnIDs, n.desc.Columns[i].ID) + outCols := getOutputColumnsFromColsForScan(info.cols, info.colsToTableOrdrinalMap) + planToStreamColMap := make([]int, len(info.cols)) + descColumnIDs := make([]sqlbase.ColumnID, 0, len(info.desc.Columns)) + for i := range info.desc.Columns { + descColumnIDs = append(descColumnIDs, info.desc.Columns[i].ID) } if returnMutations { - for _, c := range n.desc.MutationColumns() { + for _, c := range info.desc.MutationColumns() { descColumnIDs = append(descColumnIDs, c.ID) } } for i := range planToStreamColMap { planToStreamColMap[i] = -1 for j, c := range outCols { - if descColumnIDs[c] == n.cols[i].ID { + if descColumnIDs[c] == info.cols[i].ID { planToStreamColMap[i] = j break } @@ -1164,7 +1203,7 @@ func (dsp *DistSQLPlanner) createTableReaders( p.AddProjection(outCols) p.PlanToStreamColMap = planToStreamColMap - return &p, nil + return nil } // selectRenders takes a PhysicalPlan that produces the results corresponding to @@ -2068,6 +2107,14 @@ func (dsp *DistSQLPlanner) createPlanForZigzagJoin( return plan, nil } +func getTypesFromResultColumns(cols sqlbase.ResultColumns) []*types.T { + typs := make([]*types.T, len(cols)) + for i, col := range cols { + typs[i] = col.Typ + } + return typs +} + // getTypesForPlanResult returns the types of the elements in the result streams // of a plan that corresponds to a given planNode. If planToStreamColMap is nil, // a 1-1 mapping is assumed. @@ -2075,11 +2122,7 @@ func getTypesForPlanResult(node planNode, planToStreamColMap []int) ([]*types.T, nodeColumns := planColumns(node) if planToStreamColMap == nil { // No remapping. - types := make([]*types.T, len(nodeColumns)) - for i := range nodeColumns { - types[i] = nodeColumns[i].Typ - } - return types, nil + return getTypesFromResultColumns(nodeColumns), nil } numCols := 0 for _, streamCol := range planToStreamColMap { diff --git a/pkg/sql/distsql_spec_exec_factory.go b/pkg/sql/distsql_spec_exec_factory.go index c07c705e5182..25e379e9db80 100644 --- a/pkg/sql/distsql_spec_exec_factory.go +++ b/pkg/sql/distsql_spec_exec_factory.go @@ -12,22 +12,28 @@ package sql import ( "github.com/cockroachdb/cockroach/pkg/geo/geoindex" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" + "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" ) type distSQLSpecExecFactory struct { + planner *planner + dsp *DistSQLPlanner } var _ exec.Factory = &distSQLSpecExecFactory{} -func newDistSQLSpecExecFactory() exec.Factory { - return &distSQLSpecExecFactory{} +func newDistSQLSpecExecFactory(p *planner) exec.Factory { + return &distSQLSpecExecFactory{planner: p, dsp: p.extendedEvalCtx.DistSQLPlanner} } func (e *distSQLSpecExecFactory) ConstructValues( @@ -36,6 +42,9 @@ func (e *distSQLSpecExecFactory) ConstructValues( return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") } +// ConstructScan implements exec.Factory interface by combining the logic that +// performs scanNode creation of execFactory.ConstructScan and physical +// planning of table readers of DistSQLPlanner.createTableReaders. func (e *distSQLSpecExecFactory) ConstructScan( table cat.Table, index cat.Index, @@ -49,12 +58,135 @@ func (e *distSQLSpecExecFactory) ConstructScan( rowCount float64, locking *tree.LockingItem, ) (exec.Node, error) { - return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") + if table.IsVirtualTable() { + return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") + } + + var p PhysicalPlan + // Although we don't yet recommend distributing plans where soft limits + // propagate to scan nodes because we don't have infrastructure to only + // plan for a few ranges at a time, the propagation of the soft limits + // to scan nodes has been added in 20.1 release, so to keep the + // previous behavior we continue to ignore the soft limits for now. + // TODO(yuzefovich): pay attention to the soft limits. + recommendation := canDistribute + + // Phase 1: set up all necessary infrastructure for table reader planning + // below. This phase is equivalent to what execFactory.ConstructScan does. + tabDesc := table.(*optTable).desc + indexDesc := index.(*optIndex).desc + colCfg := makeScanColumnsConfig(table, needed) + sb := span.MakeBuilder(e.planner.ExecCfg().Codec, tabDesc.TableDesc(), indexDesc) + + // Note that initColsForScan and setting ResultColumns below are equivalent + // to what scan.initTable call does in execFactory.ConstructScan. + cols, err := initColsForScan(tabDesc, colCfg) + if err != nil { + return nil, err + } + p.ResultColumns = sqlbase.ResultColumnsFromColDescs(tabDesc.GetID(), cols) + + if indexConstraint != nil && indexConstraint.IsContradiction() { + // TODO(yuzefovich): once ConstructValues is implemented, consider + // calling it here. + physPlan, err := e.dsp.createValuesPlan( + getTypesFromResultColumns(p.ResultColumns), 0 /* numRows */, nil, /* rawBytes */ + ) + return planMaybePhysical{physPlan: physPlan, recommendation: canDistribute}, err + } + + // TODO(yuzefovich): scanNode adds "parallel" attribute in walk.go when + // scanNode.canParallelize() returns true. We should plumb that info from + // here somehow as well. + var spans roachpb.Spans + spans, err = sb.SpansFromConstraint(indexConstraint, needed, false /* forDelete */) + if err != nil { + return nil, err + } + isFullTableScan := len(spans) == 1 && spans[0].EqualValue( + tabDesc.IndexSpan(e.planner.ExecCfg().Codec, indexDesc.ID), + ) + if err = colCfg.assertValidReqOrdering(reqOrdering); err != nil { + return nil, err + } + + // Check if we are doing a full scan. + if isFullTableScan { + recommendation = recommendation.compose(shouldDistribute) + } + + // Phase 2: perform the table reader planning. This phase is equivalent to + // what DistSQLPlanner.createTableReaders does. + colsToTableOrdinalMap := toTableOrdinals(cols, tabDesc, colCfg.visibility) + trSpec := physicalplan.NewTableReaderSpec() + *trSpec = execinfrapb.TableReaderSpec{ + Table: *tabDesc.TableDesc(), + Reverse: reverse, + IsCheck: false, + Visibility: colCfg.visibility, + // Retain the capacity of the spans slice. + Spans: trSpec.Spans[:0], + } + trSpec.IndexIdx, err = getIndexIdx(indexDesc, tabDesc) + if err != nil { + return nil, err + } + if locking != nil { + trSpec.LockingStrength = sqlbase.ToScanLockingStrength(locking.Strength) + trSpec.LockingWaitPolicy = sqlbase.ToScanLockingWaitPolicy(locking.WaitPolicy) + if trSpec.LockingStrength != sqlbase.ScanLockingStrength_FOR_NONE { + // Scans that are performing row-level locking cannot currently be + // distributed because their locks would not be propagated back to + // the root transaction coordinator. + // TODO(nvanbenschoten): lift this restriction. + recommendation = cannotDistribute + } + } + + // Note that we don't do anything about the possible filter here since we + // don't know yet whether we will have it. ConstructFilter is responsible + // for pushing the filter down into the post-processing stage of this scan. + post := execinfrapb.PostProcessSpec{} + if hardLimit != 0 { + post.Limit = uint64(hardLimit) + } else if softLimit != 0 { + trSpec.LimitHint = softLimit + } + + distribute := shouldDistributeGivenRecAndMode(recommendation, e.planner.extendedEvalCtx.SessionData.DistSQLMode) + if _, singleTenant := e.planner.execCfg.NodeID.OptionalNodeID(); !singleTenant { + distribute = false + } + + evalCtx := e.planner.ExtendedEvalContext() + planCtx := e.dsp.NewPlanningCtx(evalCtx.Context, evalCtx, e.planner.txn, distribute) + err = e.dsp.planTableReaders( + planCtx, + &p, + &tableReaderPlanningInfo{ + spec: trSpec, + post: post, + desc: tabDesc, + spans: spans, + reverse: reverse, + scanVisibility: colCfg.visibility, + maxResults: maxResults, + estimatedRowCount: uint64(rowCount), + reqOrdering: ReqOrdering(reqOrdering), + cols: cols, + colsToTableOrdrinalMap: colsToTableOrdinalMap, + }, + ) + + return planMaybePhysical{physPlan: &p, recommendation: recommendation}, err } func (e *distSQLSpecExecFactory) ConstructFilter( n exec.Node, filter tree.TypedExpr, reqOrdering exec.OutputOrdering, ) (exec.Node, error) { + // TODO(yuzefovich): figure out how to push the filter into the table + // reader when it already doesn't have a filter and it doesn't have a hard + // limit. return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") } @@ -229,13 +361,17 @@ func (e *distSQLSpecExecFactory) ConstructWindow( func (e *distSQLSpecExecFactory) RenameColumns( input exec.Node, colNames []string, ) (exec.Node, error) { - return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") + inputCols := input.(planMaybePhysical).physPlan.ResultColumns + for i := range inputCols { + inputCols[i].Name = colNames[i] + } + return input, nil } func (e *distSQLSpecExecFactory) ConstructPlan( root exec.Node, subqueries []exec.Subquery, cascades []exec.Cascade, checks []exec.Node, ) (exec.Plan, error) { - return nil, unimplemented.NewWithIssue(47473, "experimental opt-driven distsql planning") + return constructPlan(e.planner, root, subqueries, cascades, checks) } func (e *distSQLSpecExecFactory) ConstructExplainOpt( diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index d40e6fe2c52f..3e7f5197da47 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -33,7 +33,7 @@ import ( type dropDatabaseNode struct { n *tree.DropDatabase - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor td []toDelete schemasToDelete []string } @@ -69,7 +69,7 @@ func (p *planner) DropDatabase(ctx context.Context, n *tree.DropDatabase) (planN return nil, err } - schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.ID) + schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.GetID()) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (p *planner) DropDatabase(ctx context.Context, n *tree.DropDatabase) (planN case tree.DropRestrict: return nil, pgerror.Newf(pgcode.DependentObjectsStillExist, "database %q is not empty and RESTRICT was specified", - tree.ErrNameStringP(&dbDesc.Name)) + tree.ErrNameString(dbDesc.GetName())) case tree.DropDefault: // The default is CASCADE, however be cautious if CASCADE was // not specified explicitly. @@ -130,10 +130,11 @@ func (p *planner) DropDatabase(ctx context.Context, n *tree.DropDatabase) (planN ) } if tbDesc.State == sqlbase.TableDescriptor_OFFLINE { + dbName := dbDesc.GetName() return nil, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "cannot drop a database with OFFLINE tables, ensure %s is"+ " dropped or made public before dropping database %s", - tbName.String(), tree.AsString((*tree.Name)(&dbDesc.Name))) + tbName.String(), tree.AsString((*tree.Name)(&dbName))) } if err := p.prepareDropWithTableDesc(ctx, tbDesc); err != nil { return nil, err @@ -173,7 +174,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { tableDescs = append(tableDescs, toDel.desc) } if err := p.createDropDatabaseJob( - ctx, n.dbDesc.ID, droppedTableDetails, tree.AsStringWithFQNames(n.n, params.Ann()), + ctx, n.dbDesc.GetID(), droppedTableDetails, tree.AsStringWithFQNames(n.n, params.Ann()), ); err != nil { return err } @@ -200,7 +201,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { tbNameStrings = append(tbNameStrings, toDel.tn.FQString()) } - descKey := sqlbase.MakeDescMetadataKey(p.ExecCfg().Codec, n.dbDesc.ID) + descKey := sqlbase.MakeDescMetadataKey(p.ExecCfg().Codec, n.dbDesc.GetID()) b := &kv.Batch{} if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { @@ -213,7 +214,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { ctx, p.txn, p.ExecCfg().Codec, - n.dbDesc.ID, + n.dbDesc.GetID(), schemaToDelete, ); err != nil { return err @@ -221,7 +222,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { } err := sqlbase.RemoveDatabaseNamespaceEntry( - ctx, p.txn, p.ExecCfg().Codec, n.dbDesc.Name, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), + ctx, p.txn, p.ExecCfg().Codec, n.dbDesc.GetName(), p.ExtendedEvalContext().Tracing.KVTracingEnabled(), ) if err != nil { return err @@ -230,7 +231,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { // No job was created because no tables were dropped, so zone config can be // immediately removed, if applicable. if len(tableDescs) == 0 && params.ExecCfg().Codec.ForSystemTenant() { - zoneKeyPrefix := config.MakeZoneKeyPrefix(config.SystemTenantObjectID(n.dbDesc.ID)) + zoneKeyPrefix := config.MakeZoneKeyPrefix(config.SystemTenantObjectID(n.dbDesc.GetID())) if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { log.VEventf(ctx, 2, "DelRange %s", zoneKeyPrefix) } @@ -238,13 +239,13 @@ func (n *dropDatabaseNode) startExec(params runParams) error { b.DelRange(zoneKeyPrefix, zoneKeyPrefix.PrefixEnd(), false /* returnKeys */) } - p.Tables().AddUncommittedDatabase(n.dbDesc.Name, n.dbDesc.ID, descs.DBDropped) + p.Tables().AddUncommittedDatabase(n.dbDesc.GetName(), n.dbDesc.GetID(), descs.DBDropped) if err := p.txn.Run(ctx, b); err != nil { return err } - if err := p.removeDbComment(ctx, n.dbDesc.ID); err != nil { + if err := p.removeDbComment(ctx, n.dbDesc.GetID()); err != nil { return err } @@ -254,7 +255,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { ctx, p.txn, EventLogDropDatabase, - int32(n.dbDesc.ID), + int32(n.dbDesc.GetID()), int32(params.extendedEvalCtx.NodeID.SQLInstanceID()), struct { DatabaseName string diff --git a/pkg/sql/drop_role.go b/pkg/sql/drop_role.go index 143bf8d46539..12f1453df26e 100644 --- a/pkg/sql/drop_role.go +++ b/pkg/sql/drop_role.go @@ -90,13 +90,13 @@ func (n *DropRoleNode) startExec(params runParams) error { // First check all the databases. if err := forEachDatabaseDesc(params.ctx, params.p, nil /*nil prefix = all databases*/, true, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { for _, u := range db.GetPrivileges().Users { if _, ok := userNames[u.User]; ok { if f.Len() > 0 { f.WriteString(", ") } - f.FormatNameP(&db.Name) + f.FormatName(db.GetName()) break } } @@ -118,7 +118,7 @@ func (n *DropRoleNode) startExec(params runParams) error { lCtx := newInternalLookupCtx(descs, nil /*prefix - we want all descriptors */) for _, tbID := range lCtx.tbIDs { table := lCtx.tbDescs[tbID] - if !tableIsVisible(table, true /*allowAdding*/) { + if !tableIsVisible(table.TableDesc(), true /*allowAdding*/) { continue } for _, u := range table.GetPrivileges().Users { @@ -126,8 +126,8 @@ func (n *DropRoleNode) startExec(params runParams) error { if f.Len() > 0 { f.WriteString(", ") } - parentName := lCtx.getParentName(table) - tn := tree.MakeTableName(tree.Name(parentName), tree.Name(table.Name)) + parentName := lCtx.getParentName(table.TableDesc()) + tn := tree.MakeTableName(tree.Name(parentName), tree.Name(table.GetName())) f.FormatNode(&tn) break } diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 8bfac2c2f0d2..80ffa862d58c 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -380,7 +380,7 @@ func (p *planner) initiateDropTable( parentSchemaID := tableDesc.GetParentSchemaID() // Queue up name for draining. - nameDetails := sqlbase.TableDescriptor_NameInfo{ + nameDetails := sqlbase.NameInfo{ ParentID: tableDesc.ParentID, ParentSchemaID: parentSchemaID, Name: tableDesc.Name} diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 0936832bf872..dea087564057 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -126,9 +126,8 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil { t.Fatal(err) } - dbDesc := desc.GetDatabase() - - tbNameKey := sqlbase.NewPublicTableKey(dbDesc.ID, "kv").Key(keys.SystemSQLCodec) + dbDesc := sqlbase.NewImmutableDatabaseDescriptor(*desc.GetDatabase()) + tbNameKey := sqlbase.NewPublicTableKey(dbDesc.GetID(), "kv").Key(keys.SystemSQLCodec) gr, err := kvDB.Get(ctx, tbNameKey) if err != nil { t.Fatal(err) @@ -152,14 +151,14 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil { t.Fatal(err) } - if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.ID, buf); err != nil { + if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.GetID(), buf); err != nil { t.Fatal(err) } if err := zoneExists(sqlDB, &cfg, tbDesc.ID); err != nil { t.Fatal(err) } - if err := zoneExists(sqlDB, &cfg, dbDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, dbDesc.GetID()); err != nil { t.Fatal(err) } @@ -188,11 +187,11 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); t.Fatalf("table descriptor key still exists after database is dropped") } - if err := descExists(sqlDB, false, dbDesc.ID); err != nil { + if err := descExists(sqlDB, false, dbDesc.GetID()); err != nil { t.Fatal(err) } // Database zone config is removed once all table data and zone configs are removed. - if err := zoneExists(sqlDB, &cfg, dbDesc.ID); err != nil { + if err := zoneExists(sqlDB, &cfg, dbDesc.GetID()); err != nil { t.Fatal(err) } @@ -209,7 +208,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); sqlRun := sqlutils.MakeSQLRunner(sqlDB) // There are no more namespace entries referencing this database as its // parent. - namespaceQuery := fmt.Sprintf(`SELECT * FROM system.namespace WHERE "parentID" = %d`, dbDesc.ID) + namespaceQuery := fmt.Sprintf(`SELECT * FROM system.namespace WHERE "parentID" = %d`, dbDesc.GetID()) sqlRun.CheckQueryResults(t, namespaceQuery, [][]string{}) // Job still running, waiting for GC. @@ -309,9 +308,9 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil { t.Fatal(err) } - dbDesc := desc.GetDatabase() + dbDesc := sqlbase.NewImmutableDatabaseDescriptor(*desc.GetDatabase()) - tKey := sqlbase.NewPublicTableKey(dbDesc.ID, "kv") + tKey := sqlbase.NewPublicTableKey(dbDesc.GetID(), "kv") gr, err := kvDB.Get(ctx, tKey.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) @@ -326,7 +325,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); } tbDesc := desc.Table(ts) - t2Key := sqlbase.NewPublicTableKey(dbDesc.ID, "kv2") + t2Key := sqlbase.NewPublicTableKey(dbDesc.GetID(), "kv2") gr2, err := kvDB.Get(ctx, t2Key.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) @@ -346,7 +345,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); tests.CheckKeyCount(t, kvDB, tableSpan, 6) tests.CheckKeyCount(t, kvDB, table2Span, 6) - if _, err := sqltestutils.AddDefaultZoneConfig(sqlDB, dbDesc.ID); err != nil { + if _, err := sqltestutils.AddDefaultZoneConfig(sqlDB, dbDesc.GetID()); err != nil { t.Fatal(err) } @@ -395,7 +394,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); tests.CheckKeyCount(t, kvDB, table2Span, 6) def := zonepb.DefaultZoneConfig() - if err := zoneExists(sqlDB, &def, dbDesc.ID); err != nil { + if err := zoneExists(sqlDB, &def, dbDesc.GetID()); err != nil { t.Fatal(err) } @@ -412,7 +411,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, tb2Desc.ID); err != nil { t.Fatal(err) } - if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, dbDesc.ID); err != nil { + if _, err := sqltestutils.AddImmediateGCZoneConfig(sqlDB, dbDesc.GetID()); err != nil { t.Fatal(err) } @@ -438,7 +437,7 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); } // Database zone config is removed once all table data and zone configs are removed. - if err := zoneExists(sqlDB, nil, dbDesc.ID); err != nil { + if err := zoneExists(sqlDB, nil, dbDesc.GetID()); err != nil { t.Fatal(err) } } @@ -918,13 +917,15 @@ func TestDropTableDeleteData(t *testing.T) { } } -func writeTableDesc(ctx context.Context, db *kv.DB, tableDesc *sqlbase.TableDescriptor) error { +func writeTableDesc( + ctx context.Context, db *kv.DB, tableDesc *sqlbase.MutableTableDescriptor, +) error { return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } tableDesc.ModificationTime = txn.CommitTimestamp() - return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) + return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), tableDesc.DescriptorProto()) }) } @@ -959,7 +960,7 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { sqlutils.CreateTable(t, sqlDBRaw, "t", "a INT", numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn)) // Give the table an old format version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") tableDesc.FormatVersion = sqlbase.FamilyFormatVersion tableDesc.Version++ if err := writeTableDesc(ctx, kvDB, tableDesc); err != nil { @@ -974,7 +975,7 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { // Simulate a migration upgrading the table descriptor's format version after // the table has been dropped but before the truncation has occurred. var err error - tableDesc, err = sqlbase.GetTableDescFromID(ctx, kvDB.NewTxn(ctx, ""), keys.SystemSQLCodec, tableDesc.ID) + tableDesc, err = sqlbase.GetMutableTableDescFromID(ctx, kvDB.NewTxn(ctx, ""), keys.SystemSQLCodec, tableDesc.ID) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/exec_factory_util.go b/pkg/sql/exec_factory_util.go new file mode 100644 index 000000000000..272873e051a9 --- /dev/null +++ b/pkg/sql/exec_factory_util.go @@ -0,0 +1,106 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql + +import ( + "fmt" + + "github.com/cockroachdb/cockroach/pkg/sql/execinfra" + "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" + "github.com/cockroachdb/cockroach/pkg/sql/rowexec" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/errors" +) + +func constructPlan( + planner *planner, + root exec.Node, + subqueries []exec.Subquery, + cascades []exec.Cascade, + checks []exec.Node, +) (exec.Plan, error) { + res := &planTop{ + // TODO(radu): these fields can be modified by planning various opaque + // statements. We should have a cleaner way of plumbing these. + avoidBuffering: planner.curPlan.avoidBuffering, + auditEvents: planner.curPlan.auditEvents, + instrumentation: planner.curPlan.instrumentation, + } + assignPlan := func(plan *planMaybePhysical, node exec.Node) { + switch n := node.(type) { + case planNode: + plan.planNode = n + case planMaybePhysical: + *plan = n + default: + panic(fmt.Sprintf("unexpected node type %T", node)) + } + } + assignPlan(&res.main, root) + if len(subqueries) > 0 { + res.subqueryPlans = make([]subquery, len(subqueries)) + for i := range subqueries { + in := &subqueries[i] + out := &res.subqueryPlans[i] + out.subquery = in.ExprNode + switch in.Mode { + case exec.SubqueryExists: + out.execMode = rowexec.SubqueryExecModeExists + case exec.SubqueryOneRow: + out.execMode = rowexec.SubqueryExecModeOneRow + case exec.SubqueryAnyRows: + out.execMode = rowexec.SubqueryExecModeAllRowsNormalized + case exec.SubqueryAllRows: + out.execMode = rowexec.SubqueryExecModeAllRows + default: + return nil, errors.Errorf("invalid SubqueryMode %d", in.Mode) + } + out.expanded = true + assignPlan(&out.plan, in.Root) + } + } + if len(cascades) > 0 { + res.cascades = make([]cascadeMetadata, len(cascades)) + for i := range cascades { + res.cascades[i].Cascade = cascades[i] + } + } + if len(checks) > 0 { + res.checkPlans = make([]checkPlan, len(checks)) + for i := range checks { + assignPlan(&res.checkPlans[i].plan, checks[i]) + } + } + + return res, nil +} + +// makeScanColumnsConfig builds a scanColumnsConfig struct by constructing a +// list of descriptor IDs for columns in the given cols set. Columns are +// identified by their ordinal position in the table schema. +func makeScanColumnsConfig(table cat.Table, cols exec.TableColumnOrdinalSet) scanColumnsConfig { + // Set visibility=execinfra.ScanVisibilityPublicAndNotPublic, since all + // columns in the "cols" set should be projected, regardless of whether + // they're public or non-public. The caller decides which columns to + // include (or not include). Note that when wantedColumns is non-empty, + // the visibility flag will never trigger the addition of more columns. + colCfg := scanColumnsConfig{ + wantedColumns: make([]tree.ColumnID, 0, cols.Len()), + visibility: execinfra.ScanVisibilityPublicAndNotPublic, + } + for c, ok := cols.Next(0); ok; c, ok = cols.Next(c + 1) { + desc := table.Column(c).(*sqlbase.ColumnDescriptor) + colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(desc.ID)) + } + return colCfg +} diff --git a/pkg/sql/exec_log.go b/pkg/sql/exec_log.go index 27d311a24e76..ca217215e161 100644 --- a/pkg/sql/exec_log.go +++ b/pkg/sql/exec_log.go @@ -216,7 +216,7 @@ func (p *planner) maybeLogStatementInternal( // call to this method elsewhere must find a way to ensure that // contributors who later add features do not have to remember to call // this to get it right. -func (p *planner) maybeAudit(desc sqlbase.DescriptorProto, priv privilege.Kind) { +func (p *planner) maybeAudit(desc sqlbase.DescriptorInterface, priv privilege.Kind) { wantedMode := desc.GetAuditMode() if wantedMode == sqlbase.TableDescriptor_DISABLED { return @@ -233,7 +233,7 @@ func (p *planner) maybeAudit(desc sqlbase.DescriptorProto, priv privilege.Kind) // auditEvent represents an audit event for a single table. type auditEvent struct { // The descriptor being audited. - desc sqlbase.DescriptorProto + desc sqlbase.DescriptorInterface // Whether the event was for INSERT/DELETE/UPDATE. writing bool } diff --git a/pkg/sql/execinfrapb/data.go b/pkg/sql/execinfrapb/data.go index 357c9d3ccfa1..1103e2e6253d 100644 --- a/pkg/sql/execinfrapb/data.go +++ b/pkg/sql/execinfrapb/data.go @@ -93,8 +93,8 @@ func (tr *DistSQLTypeResolver) ResolveType( func makeTypeLookupFunc( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ) sqlbase.TypeLookupFunc { - return func(id sqlbase.ID) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { - return resolver.ResolveTypeDescByID(ctx, txn, codec, id) + return func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { + return resolver.ResolveTypeDescByID(ctx, txn, codec, id, tree.ObjectLookupFlags{}) } } diff --git a/pkg/sql/flowinfra/inbound.go b/pkg/sql/flowinfra/inbound.go index baf386f38045..2e7ef7748b3e 100644 --- a/pkg/sql/flowinfra/inbound.go +++ b/pkg/sql/flowinfra/inbound.go @@ -194,7 +194,10 @@ func processProducerMessage( if err != nil { return processMessageResult{ err: errors.Wrapf(err, "%s", - log.MakeMessage(ctx, "decoding error", nil /* args */)), + // TODO(knz): Instead of pre-formatting the string here, use + // errors.WithContextTags() here and let the error formatter + // show the tags later. + log.FormatWithContextTags(ctx, "decoding error")), consumerClosed: false, } } diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index dfdab5f2ba2d..890773bdb0e2 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -77,15 +77,15 @@ func TestSchemaChangeGCJob(t *testing.T) { myTableID := sqlbase.ID(keys.MinUserDescID + 3) myOtherTableID := sqlbase.ID(keys.MinUserDescID + 4) - var myTableDesc *sqlbase.TableDescriptor - var myOtherTableDesc *sqlbase.TableDescriptor + var myTableDesc *sqlbase.MutableTableDescriptor + var myOtherTableDesc *sqlbase.MutableTableDescriptor if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) + myTableDesc, err = sqlbase.GetMutableTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) if err != nil { return err } - myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) + myOtherTableDesc, err = sqlbase.GetMutableTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) return err }); err != nil { t.Fatal(err) @@ -146,10 +146,10 @@ func TestSchemaChangeGCJob(t *testing.T) { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, myTableID) - descDesc := sqlbase.WrapDescriptor(myTableDesc) + descDesc := myTableDesc.DescriptorProto() b.Put(descKey, descDesc) descKey2 := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, myOtherTableID) - descDesc2 := sqlbase.WrapDescriptor(myOtherTableDesc) + descDesc2 := myOtherTableDesc.DescriptorProto() b.Put(descKey2, descDesc2) return txn.Run(ctx, b) }); err != nil { @@ -196,13 +196,13 @@ func TestSchemaChangeGCJob(t *testing.T) { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) + myTableDesc, err = sqlbase.GetMutableTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) if ttlTime != FUTURE && (dropItem == TABLE || dropItem == DATABASE) { // We dropped the table, so expect it to not be found. require.EqualError(t, err, "descriptor not found") return nil } - myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) + myOtherTableDesc, err = sqlbase.GetMutableTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) if ttlTime != FUTURE && dropItem == DATABASE { // We dropped the entire database, so expect none of the tables to be found. require.EqualError(t, err, "descriptor not found") diff --git a/pkg/sql/grant_revoke.go b/pkg/sql/grant_revoke.go index e54586492ddb..1b48081f4088 100644 --- a/pkg/sql/grant_revoke.go +++ b/pkg/sql/grant_revoke.go @@ -104,7 +104,7 @@ func (n *changePrivilegesNode) startExec(params runParams) error { } } - var descriptors []sqlbase.DescriptorProto + var descriptors []sqlbase.DescriptorInterface // DDL statements avoid the cache to avoid leases, and can view non-public descriptors. // TODO(vivek): check if the cache can be used. p.runWithOptions(resolveFlags{skipCache: true}, func() { @@ -142,7 +142,7 @@ func (n *changePrivilegesNode) startExec(params runParams) error { } switch d := descriptor.(type) { - case *sqlbase.DatabaseDescriptor: + case *sqlbase.ImmutableDatabaseDescriptor: if err := d.Validate(); err != nil { return err } diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index 21df6f7dcd22..55ad67bfb6d2 100644 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -196,7 +196,7 @@ var informationSchemaAdministrableRoleAuthorizations = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#administrable_role_authorizations") + ` https://www.postgresql.org/docs/9.5/infoschema-administrable-role-authorizations.html`, schema: vtable.InformationSchemaAdministrableRoleAuthorizations, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { currentUser := p.SessionData().User memberMap, err := p.MemberOfWithAdminOption(ctx, currentUser) if err != nil { @@ -228,7 +228,7 @@ var informationSchemaApplicableRoles = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#applicable_roles") + ` https://www.postgresql.org/docs/9.5/infoschema-applicable-roles.html`, schema: vtable.InformationSchemaApplicableRoles, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { currentUser := p.SessionData().User memberMap, err := p.MemberOfWithAdminOption(ctx, currentUser) if err != nil { @@ -256,19 +256,19 @@ var informationSchemaCheckConstraints = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#check_constraints") + ` https://www.postgresql.org/docs/9.5/infoschema-check-constraints.html`, schema: vtable.InformationSchemaCheckConstraints, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { conInfo, err := table.GetConstraintInfoWithLookup(tableLookup.getTableByID) if err != nil { return err } - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) for conName, con := range conInfo { // Only Check constraints are included. @@ -325,9 +325,11 @@ var informationSchemaColumnPrivileges = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#column_privileges") + ` https://www.postgresql.org/docs/9.5/infoschema-column-privileges.html`, schema: vtable.InformationSchemaColumnPrivileges, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, virtualMany, func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { - dbNameStr := tree.NewDString(db.Name) + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { + return forEachTableDesc(ctx, p, dbContext, virtualMany, func( + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor, + ) error { + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) columndata := privilege.List{privilege.SELECT, privilege.INSERT, privilege.UPDATE} // privileges for column level granularity for _, u := range table.Privileges.Users { @@ -361,9 +363,11 @@ var informationSchemaColumnsTable = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#columns") + ` https://www.postgresql.org/docs/9.5/infoschema-columns.html`, schema: vtable.InformationSchemaColumns, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { - return forEachTableDesc(ctx, p, dbContext, virtualMany, func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { - dbNameStr := tree.NewDString(db.Name) + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { + return forEachTableDesc(ctx, p, dbContext, virtualMany, func( + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor, + ) error { + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) return forEachColumnInTable(table, func(column *sqlbase.ColumnDescriptor) error { collationCatalog := tree.DNull @@ -454,7 +458,7 @@ https://www.postgresql.org/docs/9.5/infoschema-enabled-roles.html`, CREATE TABLE information_schema.enabled_roles ( ROLE_NAME STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { currentUser := p.SessionData().User memberMap, err := p.MemberOfWithAdminOption(ctx, currentUser) if err != nil { @@ -585,11 +589,11 @@ CREATE TABLE information_schema.constraint_column_usage ( CONSTRAINT_SCHEMA STRING NOT NULL, CONSTRAINT_NAME STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { conInfo, err := table.GetConstraintInfoWithLookup(tableLookup.getTableByID) @@ -597,10 +601,10 @@ CREATE TABLE information_schema.constraint_column_usage ( return err } scNameStr := tree.NewDString(scName) - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) for conName, con := range conInfo { - conTable := table + conTable := table.TableDesc() conCols := con.Columns conNameStr := tree.NewDString(conName) if con.Kind == sqlbase.ConstraintTypeFK { @@ -650,18 +654,18 @@ CREATE TABLE information_schema.key_column_usage ( ORDINAL_POSITION INT NOT NULL, POSITION_IN_UNIQUE_CONSTRAINT INT )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { conInfo, err := table.GetConstraintInfoWithLookup(tableLookup.getTableByID) if err != nil { return err } - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.Name) for conName, con := range conInfo { @@ -742,7 +746,7 @@ CREATE TABLE information_schema.parameters ( DTD_IDENTIFIER STRING, PARAMETER_DEFAULT STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -800,14 +804,14 @@ CREATE TABLE information_schema.referential_constraints ( TABLE_NAME STRING NOT NULL, REFERENCED_TABLE_NAME STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /* no constraints in virtual tables */, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.Name) for i := range table.OutboundFKs { @@ -956,7 +960,7 @@ CREATE TABLE information_schema.routines ( RESULT_CAST_MAXIMUM_CARDINALITY INT, RESULT_CAST_DTD_IDENTIFIER STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -967,15 +971,15 @@ var informationSchemaSchemataTable = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#schemata") + ` https://www.postgresql.org/docs/9.5/infoschema-schemata.html`, schema: vtable.InformationSchemaSchemata, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachDatabaseDesc(ctx, p, dbContext, true, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { return forEachSchemaName(ctx, p, db, func(sc string) error { return addRow( - tree.NewDString(db.Name), // catalog_name - tree.NewDString(sc), // schema_name - tree.DNull, // default_character_set_name - tree.DNull, // sql_path + tree.NewDString(db.GetName()), // catalog_name + tree.NewDString(sc), // schema_name + tree.DNull, // default_character_set_name + tree.DNull, // sql_path ) }) }) @@ -994,12 +998,12 @@ CREATE TABLE information_schema.schema_privileges ( PRIVILEGE_TYPE STRING NOT NULL, IS_GRANTABLE STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachDatabaseDesc(ctx, p, dbContext, true, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { return forEachSchemaName(ctx, p, db, func(scName string) error { privs := db.Privileges.Show() - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) // TODO(knz): This should filter for the current user, see // https://github.com/cockroachdb/cockroach/issues/35572 @@ -1058,9 +1062,9 @@ CREATE TABLE information_schema.sequences ( INCREMENT STRING NOT NULL, CYCLE_OPTION STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* no sequences in virtual schemas */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if !table.IsSequence() { return nil } @@ -1103,9 +1107,9 @@ CREATE TABLE information_schema.statistics ( STORING STRING NOT NULL, IMPLICIT STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual tables have no indexes */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.GetName()) @@ -1200,13 +1204,13 @@ CREATE TABLE information_schema.table_constraints ( IS_DEFERRABLE STRING NOT NULL, INITIALLY_DEFERRED STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual, /* virtual tables have no constraints */ func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { conInfo, err := table.GetConstraintInfoWithLookup(tableLookup.getTableByID) @@ -1214,7 +1218,7 @@ CREATE TABLE information_schema.table_constraints ( return err } - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.Name) @@ -1278,10 +1282,10 @@ CREATE TABLE information_schema.user_privileges ( PRIVILEGE_TYPE STRING NOT NULL, IS_GRANTABLE STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachDatabaseDesc(ctx, p, dbContext, true, /* requiresPrivileges */ - func(dbDesc *DatabaseDescriptor) error { - dbNameStr := tree.NewDString(dbDesc.Name) + func(dbDesc *sqlbase.ImmutableDatabaseDescriptor) error { + dbNameStr := tree.NewDString(dbDesc.GetName()) for _, u := range []string{security.RootUser, sqlbase.AdminRole} { grantee := tree.NewDString(u) for _, p := range privilege.List(privilege.ByValue[:]).SortedNames() { @@ -1321,11 +1325,14 @@ CREATE TABLE information_schema.table_privileges ( // populateTablePrivileges is used to populate both table_privileges and role_table_grants. func populateTablePrivileges( - ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error, + ctx context.Context, + p *planner, + dbContext *sqlbase.ImmutableDatabaseDescriptor, + addRow func(...tree.Datum) error, ) error { return forEachTableDesc(ctx, p, dbContext, virtualMany, - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { - dbNameStr := tree.NewDString(db.Name) + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.Name) // TODO(knz): This should filter for the current user, see @@ -1362,12 +1369,12 @@ var informationSchemaTablesTable = virtualSchemaTable{ ` + base.DocsURL("information-schema.html#tables") + ` https://www.postgresql.org/docs/9.5/infoschema-tables.html`, schema: vtable.InformationSchemaTables, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc(ctx, p, dbContext, virtualMany, addTablesTableRow(addRow)) }, indexes: []virtualIndex{ { - populate: func(ctx context.Context, constraint tree.Datum, p *planner, db *DatabaseDescriptor, + populate: func(ctx context.Context, constraint tree.Datum, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) (bool, error) { // This index is on the TABLE_NAME column. name := tree.MustBeDString(constraint) @@ -1376,11 +1383,11 @@ https://www.postgresql.org/docs/9.5/infoschema-tables.html`, if err != nil || desc == nil { return false, err } - schemaName, err := resolver.ResolveSchemaNameByID(ctx, p.txn, p.ExecCfg().Codec, db.ID, desc.GetParentSchemaID()) + schemaName, err := resolver.ResolveSchemaNameByID(ctx, p.txn, p.ExecCfg().Codec, db.GetID(), desc.GetParentSchemaID()) if err != nil { return false, err } - return true, addTablesTableRow(addRow)(db, schemaName, desc.TableDesc()) + return true, addTablesTableRow(addRow)(db, schemaName, desc) }, }, }, @@ -1388,9 +1395,9 @@ https://www.postgresql.org/docs/9.5/infoschema-tables.html`, func addTablesTableRow( addRow func(...tree.Datum) error, -) func(db *sqlbase.DatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor) error { - return func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { +) func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, + table *sqlbase.ImmutableTableDescriptor) error { + return func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if table.IsSequence() { return nil } @@ -1405,7 +1412,7 @@ func addTablesTableRow( } else if table.Temporary { tableType = tableTypeTemporary } - dbNameStr := tree.NewDString(db.Name) + dbNameStr := tree.NewDString(db.GetName()) scNameStr := tree.NewDString(scName) tbNameStr := tree.NewDString(table.Name) return addRow( @@ -1438,9 +1445,9 @@ CREATE TABLE information_schema.views ( IS_TRIGGER_DELETABLE STRING NOT NULL, IS_TRIGGER_INSERTABLE_INTO STRING NOT NULL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual schemas have no views */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if !table.IsView() { return nil } @@ -1453,7 +1460,7 @@ CREATE TABLE information_schema.views ( // TODO(a-robinson): Insert column aliases into view query once we // have a semantic query representation to work with (#10083). return addRow( - tree.NewDString(db.Name), // table_catalog + tree.NewDString(db.GetName()), // table_catalog tree.NewDString(scName), // table_schema tree.NewDString(table.Name), // table_name tree.NewDString(table.ViewQuery), // view_definition @@ -1470,7 +1477,7 @@ CREATE TABLE information_schema.views ( // forEachSchemaName iterates over the physical and virtual schemas. func forEachSchemaName( - ctx context.Context, p *planner, db *sqlbase.DatabaseDescriptor, fn func(string) error, + ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, fn func(string) error, ) error { schemaNames, err := getSchemaNames(ctx, p, db) if err != nil { @@ -1482,7 +1489,7 @@ func forEachSchemaName( scNames = append(scNames, name) } for _, schema := range vtableEntries { - scNames = append(scNames, schema.desc.Name) + scNames = append(scNames, schema.desc.GetName()) } sort.Strings(scNames) for _, sc := range scNames { @@ -1500,11 +1507,11 @@ func forEachSchemaName( func forEachDatabaseDesc( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, requiresPrivileges bool, - fn func(*sqlbase.DatabaseDescriptor) error, + fn func(*sqlbase.ImmutableDatabaseDescriptor) error, ) error { - var dbDescs []*sqlbase.DatabaseDescriptor + var dbDescs []*sqlbase.ImmutableDatabaseDescriptor if dbContext == nil { allDbDescs, err := p.Tables().GetAllDatabaseDescriptors(ctx, p.txn) if err != nil { @@ -1514,7 +1521,7 @@ func forEachDatabaseDesc( } else { // We can't just use dbContext here because we need to fetch the descriptor // with privileges from kv. - fetchedDbDesc, err := catalogkv.GetDatabaseDescriptorsFromIDs(ctx, p.txn, p.ExecCfg().Codec, []sqlbase.ID{dbContext.ID}) + fetchedDbDesc, err := catalogkv.GetDatabaseDescriptorsFromIDs(ctx, p.txn, p.ExecCfg().Codec, []sqlbase.ID{dbContext.GetID()}) if err != nil { return err } @@ -1539,8 +1546,8 @@ func forEachDatabaseDesc( func forEachTypeDesc( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, - fn func(db *DatabaseDescriptor, sc string, typ *TypeDescriptor) error, + dbContext *sqlbase.ImmutableDatabaseDescriptor, + fn func(db *sqlbase.ImmutableDatabaseDescriptor, sc string, typ *sqlbase.ImmutableTypeDescriptor) error, ) error { descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) if err != nil { @@ -1583,14 +1590,15 @@ func forEachTypeDesc( func forEachTableDesc( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, virtualOpts virtualOpts, - fn func(*sqlbase.DatabaseDescriptor, string, *sqlbase.TableDescriptor) error, + // TODO(ajwerner): Introduce TableDescriptorInterface. + fn func(*sqlbase.ImmutableDatabaseDescriptor, string, *sqlbase.ImmutableTableDescriptor) error, ) error { return forEachTableDescWithTableLookup(ctx, p, dbContext, virtualOpts, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, _ tableLookupFn, ) error { return fn(db, scName, table) @@ -1613,16 +1621,16 @@ const ( func forEachTableDescAll( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, virtualOpts virtualOpts, - fn func(*sqlbase.DatabaseDescriptor, string, *sqlbase.TableDescriptor) error, + fn func(*sqlbase.ImmutableDatabaseDescriptor, string, *sqlbase.ImmutableTableDescriptor) error, ) error { return forEachTableDescAllWithTableLookup(ctx, p, dbContext, virtualOpts, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, _ tableLookupFn, ) error { return fn(db, scName, table) @@ -1634,9 +1642,9 @@ func forEachTableDescAll( func forEachTableDescAllWithTableLookup( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, virtualOpts virtualOpts, - fn func(*sqlbase.DatabaseDescriptor, string, *sqlbase.TableDescriptor, tableLookupFn) error, + fn func(*sqlbase.ImmutableDatabaseDescriptor, string, *sqlbase.ImmutableTableDescriptor, tableLookupFn) error, ) error { return forEachTableDescWithTableLookupInternal(ctx, p, dbContext, virtualOpts, true /* allowAdding */, fn) @@ -1654,18 +1662,18 @@ func forEachTableDescAllWithTableLookup( func forEachTableDescWithTableLookup( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, virtualOpts virtualOpts, - fn func(*sqlbase.DatabaseDescriptor, string, *sqlbase.TableDescriptor, tableLookupFn) error, + fn func(*sqlbase.ImmutableDatabaseDescriptor, string, *sqlbase.ImmutableTableDescriptor, tableLookupFn) error, ) error { return forEachTableDescWithTableLookupInternal(ctx, p, dbContext, virtualOpts, false /* allowAdding */, fn) } func getSchemaNames( - ctx context.Context, p *planner, dbContext *DatabaseDescriptor, + ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, ) (map[sqlbase.ID]string, error) { if dbContext != nil { - return p.Tables().GetSchemasForDatabase(ctx, p.txn, dbContext.ID) + return p.Tables().GetSchemasForDatabase(ctx, p.txn, dbContext.GetID()) } ret := make(map[sqlbase.ID]string) dbs, err := p.Tables().GetAllDatabaseDescriptors(ctx, p.txn) @@ -1673,7 +1681,7 @@ func getSchemaNames( return nil, err } for _, db := range dbs { - schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, db.ID) + schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, db.GetID()) if err != nil { return nil, err } @@ -1692,10 +1700,10 @@ func getSchemaNames( func forEachTableDescWithTableLookupInternal( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, virtualOpts virtualOpts, allowAdding bool, - fn func(*DatabaseDescriptor, string, *TableDescriptor, tableLookupFn) error, + fn func(*sqlbase.ImmutableDatabaseDescriptor, string, *ImmutableTableDescriptor, tableLookupFn) error, ) error { descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) if err != nil { @@ -1708,12 +1716,12 @@ func forEachTableDescWithTableLookupInternal( vt := p.getVirtualTabler() vEntries := vt.getEntries() vSchemaNames := vt.getSchemaNames() - iterate := func(dbDesc *DatabaseDescriptor) error { + iterate := func(dbDesc *sqlbase.ImmutableDatabaseDescriptor) error { for _, virtSchemaName := range vSchemaNames { e := vEntries[virtSchemaName] for _, tName := range e.orderedDefNames { te := e.defs[tName] - if err := fn(dbDesc, virtSchemaName, te.desc, lCtx); err != nil { + if err := fn(dbDesc, virtSchemaName, sqlbase.NewImmutableTableDescriptor(*te.desc), lCtx); err != nil { return err } } @@ -1745,13 +1753,14 @@ func forEachTableDescWithTableLookupInternal( // Physical descriptors next. for _, tbID := range lCtx.tbIDs { table := lCtx.tbDescs[tbID] - dbDesc, parentExists := lCtx.dbDescs[table.GetParentID()] - if table.Dropped() || !userCanSeeTable(ctx, p, table, allowAdding) || !parentExists { + tableDesc := table.TableDesc() + dbDesc, parentExists := lCtx.dbDescs[tableDesc.GetParentID()] + if tableDesc.Dropped() || !userCanSeeTable(ctx, p, table, allowAdding) || !parentExists { continue } - scName, ok := schemaNames[table.GetParentSchemaID()] + scName, ok := schemaNames[tableDesc.GetParentSchemaID()] if !ok { - return errors.AssertionFailedf("schema id %d not found", table.GetParentSchemaID()) + return errors.AssertionFailedf("schema id %d not found", tableDesc.GetParentSchemaID()) } if err := fn(dbDesc, scName, table, lCtx); err != nil { return err @@ -1761,7 +1770,7 @@ func forEachTableDescWithTableLookupInternal( } func forEachIndexInTable( - table *sqlbase.TableDescriptor, fn func(*sqlbase.IndexDescriptor) error, + table *sqlbase.ImmutableTableDescriptor, fn func(*sqlbase.IndexDescriptor) error, ) error { if table.IsPhysicalTable() { if err := fn(&table.PrimaryIndex); err != nil { @@ -1777,7 +1786,7 @@ func forEachIndexInTable( } func forEachColumnInTable( - table *sqlbase.TableDescriptor, fn func(*sqlbase.ColumnDescriptor) error, + table *sqlbase.ImmutableTableDescriptor, fn func(*sqlbase.ColumnDescriptor) error, ) error { // Table descriptors already hold columns in-order. for i := range table.Columns { @@ -1789,7 +1798,7 @@ func forEachColumnInTable( } func forEachColumnInIndex( - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, index *sqlbase.IndexDescriptor, fn func(*sqlbase.ColumnDescriptor) error, ) error { @@ -1875,14 +1884,16 @@ func forEachRoleMembership( return nil } -func userCanSeeDatabase(ctx context.Context, p *planner, db *sqlbase.DatabaseDescriptor) bool { +func userCanSeeDatabase( + ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, +) bool { return p.CheckAnyPrivilege(ctx, db) == nil } func userCanSeeTable( - ctx context.Context, p *planner, table *sqlbase.TableDescriptor, allowAdding bool, + ctx context.Context, p *planner, table sqlbase.DescriptorInterface, allowAdding bool, ) bool { - return tableIsVisible(table, allowAdding) && p.CheckAnyPrivilege(ctx, table) == nil + return tableIsVisible(table.TableDesc(), allowAdding) && p.CheckAnyPrivilege(ctx, table) == nil } func tableIsVisible(table *TableDescriptor, allowAdding bool) bool { diff --git a/pkg/sql/join_test.go b/pkg/sql/join_test.go index e3a9c89cfb8a..f2ea8ed7904a 100644 --- a/pkg/sql/join_test.go +++ b/pkg/sql/join_test.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -27,7 +28,11 @@ func newTestScanNode(kvDB *kv.DB, tableName string) (*scanNode, error) { p := planner{alloc: &sqlbase.DatumAlloc{}} scan := p.Scan() scan.desc = desc - err := scan.initDescDefaults(publicColumnsCfg) + var colCfg scanColumnsConfig + for _, col := range desc.Columns { + colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(col.ID)) + } + err := scan.initDescDefaults(colCfg) if err != nil { return nil, err } diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index c4bc50494249..c462a4d44ca0 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -109,12 +109,12 @@ import ( // logicTestConfigs. If the directive is missing, the test is run in the // default configuration. // -// The directive also supports blacklists, i.e. running all specified -// configurations apart from a blacklisted configuration: +// The directive also supports blocklists, i.e. running all specified +// configurations apart from a blocklisted configuration: // // # LogicTest: default-configs !3node-tenant // -// If a blacklist is specified without an accompanying configuration, the +// If a blocklist is specified without an accompanying configuration, the // default config is assumed. i.e., the following directive is equivalent to the // one above: // @@ -1397,17 +1397,17 @@ CREATE DATABASE test; t.unsupported = 0 } -// applyBlacklistToConfigIdxs applies the given blacklist to config idxs, +// applyBlocklistToConfigIdxs applies the given blocklist to config idxs, // returning the result. -func applyBlacklistToConfigIdxs( - configIdxs []logicTestConfigIdx, blacklist map[string]struct{}, +func applyBlocklistToConfigIdxs( + configIdxs []logicTestConfigIdx, blocklist map[string]struct{}, ) []logicTestConfigIdx { - if len(blacklist) == 0 { + if len(blocklist) == 0 { return configIdxs } var newConfigIdxs []logicTestConfigIdx for _, idx := range configIdxs { - if _, ok := blacklist[logicTestConfigIdxToName[idx]]; ok { + if _, ok := blocklist[logicTestConfigIdxToName[idx]]; ok { continue } newConfigIdxs = append(newConfigIdxs, idx) @@ -1418,28 +1418,28 @@ func applyBlacklistToConfigIdxs( // processConfigs, given a list of configNames, returns the list of // corresponding logicTestConfigIdxs. func processConfigs(t *testing.T, path string, configNames []string) []logicTestConfigIdx { - const blacklistChar = '!' - blacklist := make(map[string]struct{}) - allConfigNamesAreBlacklistDirectives := true + const blocklistChar = '!' + blocklist := make(map[string]struct{}) + allConfigNamesAreBlocklistDirectives := true for _, configName := range configNames { - if configName[0] != blacklistChar { - allConfigNamesAreBlacklistDirectives = false + if configName[0] != blocklistChar { + allConfigNamesAreBlocklistDirectives = false continue } - blacklist[configName[1:]] = struct{}{} + blocklist[configName[1:]] = struct{}{} } var configs []logicTestConfigIdx - if len(blacklist) != 0 && allConfigNamesAreBlacklistDirectives { - // No configs specified, this blacklist applies to the default config. - return applyBlacklistToConfigIdxs(defaultConfig, blacklist) + if len(blocklist) != 0 && allConfigNamesAreBlocklistDirectives { + // No configs specified, this blocklist applies to the default config. + return applyBlocklistToConfigIdxs(defaultConfig, blocklist) } for _, configName := range configNames { - if configName[0] == blacklistChar { + if configName[0] == blocklistChar { continue } - if _, ok := blacklist[configName]; ok { + if _, ok := blocklist[configName]; ok { continue } @@ -1447,9 +1447,9 @@ func processConfigs(t *testing.T, path string, configNames []string) []logicTest if !ok { switch configName { case defaultConfigName: - configs = append(configs, applyBlacklistToConfigIdxs(defaultConfig, blacklist)...) + configs = append(configs, applyBlocklistToConfigIdxs(defaultConfig, blocklist)...) case fiveNodeDefaultConfigName: - configs = append(configs, applyBlacklistToConfigIdxs(fiveNodeDefaultConfig, blacklist)...) + configs = append(configs, applyBlocklistToConfigIdxs(fiveNodeDefaultConfig, blocklist)...) default: t.Fatalf("%s: unknown config name %s", path, configName) } diff --git a/pkg/sql/logictest/testdata/logic_test/alter_table b/pkg/sql/logictest/testdata/logic_test/alter_table index b7e864f87c59..f994cbdc9f7a 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_table +++ b/pkg/sql/logictest/testdata/logic_test/alter_table @@ -321,14 +321,6 @@ ALTER TABLE t DROP COLUMN y statement error cannot drop column "e" because view "v" depends on it ALTER TABLE t DROP COLUMN e -# TODO(knz): this statement should succeed after #17269 is fixed. -statement error cannot drop column "d" because view "v" depends on it -ALTER TABLE t DROP COLUMN d - -# TODO(knz): remove the following once the test above succeeds. -statement ok -ALTER TABLE t DROP COLUMN d CASCADE - statement ok ALTER TABLE t DROP COLUMN e CASCADE diff --git a/pkg/sql/logictest/testdata/logic_test/dependencies b/pkg/sql/logictest/testdata/logic_test/dependencies index 699411f8a5de..bbf7cb6847cc 100644 --- a/pkg/sql/logictest/testdata/logic_test/dependencies +++ b/pkg/sql/logictest/testdata/logic_test/dependencies @@ -96,7 +96,7 @@ descriptor_id descriptor_name index_id dependedonby_id dependedonby_type de 53 test_kv NULL 54 fk NULL NULL NULL 53 test_kv NULL 55 fk NULL NULL NULL 53 test_kv NULL 56 fk NULL NULL NULL -53 test_kv NULL 59 view 0 NULL Columns: [1 2 3] +53 test_kv NULL 59 view 0 NULL Columns: [2] 53 test_kv 1 57 interleave 1 NULL SharedPrefixLen: 0 53 test_kv 1 58 interleave 2 NULL SharedPrefixLen: 0 59 test_v1 NULL 60 view 0 NULL Columns: [1] @@ -116,7 +116,7 @@ query ITIITITT colnames SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'moretest_%' ORDER BY descriptor_id, index_id, dependedonby_type, dependedonby_id, dependedonby_index_id ---- descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details -61 moretest_t NULL 62 view 0 NULL Columns: [1 2 3] +61 moretest_t NULL 62 view 0 NULL Columns: [2] # Check sequence dependencies. diff --git a/pkg/sql/logictest/testdata/logic_test/enums b/pkg/sql/logictest/testdata/logic_test/enums index 61cc593ef01a..b5bc56c67c86 100644 --- a/pkg/sql/logictest/testdata/logic_test/enums +++ b/pkg/sql/logictest/testdata/logic_test/enums @@ -395,7 +395,7 @@ query TT SHOW CREATE t1 ---- t1 CREATE TABLE t1 ( - x test.public.greeting NULL, + x public.greeting NULL, INDEX i (x ASC), FAMILY "primary" (x, rowid) ) @@ -406,7 +406,7 @@ query T SELECT create_statement FROM crdb_internal.create_statements WHERE descriptor_name = 't1' ---- CREATE TABLE t1 ( - x test.public.greeting NULL, + x public.greeting NULL, INDEX i (x ASC), FAMILY "primary" (x, rowid) ) @@ -418,7 +418,7 @@ SELECT ARRAY['hello']::_greeting, ARRAY['hello'::greeting] {hello} {hello} # Test that we can't mix enums in an array. -query error pq: expected 'cockroach'::test.public.dbs to be of type greeting, found type dbs +query error pq: expected 'cockroach'::public.dbs to be of type greeting, found type dbs SELECT ARRAY['hello'::greeting, 'cockroach'::dbs] statement ok @@ -434,7 +434,7 @@ SELECT * FROM enum_array query TTT SELECT pg_typeof(x), pg_typeof(x[1]), pg_typeof(ARRAY['hello']::_greeting) FROM enum_array LIMIT 1 ---- -test.public.greeting[] test.public.greeting test.public.greeting[] +public.greeting[] public.greeting public.greeting[] # Ensure that the implicitly created array type will tolerate collisions. # _collision will create __collision as its implicit array type, so the @@ -492,8 +492,8 @@ SHOW CREATE enum_default ---- enum_default CREATE TABLE enum_default ( x INT8 NULL, - y test.public.greeting NULL DEFAULT 'hello':::test.public.greeting, - z BOOL NULL DEFAULT 'hello':::test.public.greeting IS OF (test.public.greeting, test.public.greeting), + y public.greeting NULL DEFAULT 'hello':::public.greeting, + z BOOL NULL DEFAULT 'hello':::public.greeting IS OF (public.greeting, public.greeting), FAMILY fam_0_x_y_z_rowid (x, y, z, rowid) ) @@ -508,8 +508,8 @@ WHERE ORDER BY column_name ---- -y 'hello':::test.public.greeting -z 'hello':::test.public.greeting IS OF (test.public.greeting, test.public.greeting) +y 'hello':::public.greeting +z 'hello':::public.greeting IS OF (public.greeting, public.greeting) # Test information_schema.columns. query TT @@ -522,8 +522,8 @@ WHERE ORDER BY column_name ---- -y 'hello':::test.public.greeting -z 'hello':::test.public.greeting IS OF (test.public.greeting, test.public.greeting) +y 'hello':::public.greeting +z 'hello':::public.greeting IS OF (public.greeting, public.greeting) # Test computed columns with enum values. statement ok @@ -547,9 +547,9 @@ SHOW CREATE enum_computed ---- enum_computed CREATE TABLE enum_computed ( x INT8 NULL, - y test.public.greeting NULL AS ('hello':::test.public.greeting) STORED, - z BOOL NULL AS (w = 'howdy':::test.public.greeting) STORED, - w test.public.greeting NULL, + y public.greeting NULL AS ('hello':::public.greeting) STORED, + z BOOL NULL AS (w = 'howdy':::public.greeting) STORED, + w public.greeting NULL, FAMILY fam_0_x_y_z_w_rowid (x, y, z, w, rowid) ) @@ -564,8 +564,8 @@ WHERE ORDER BY column_name ---- -y 'hello':::test.public.greeting -z w = 'howdy':::test.public.greeting +y 'hello':::public.greeting +z w = 'howdy':::public.greeting # Test check constraints with enum values. statement ok @@ -580,10 +580,10 @@ query TT SHOW CREATE enum_checks ---- enum_checks CREATE TABLE enum_checks ( - x test.public.greeting NULL, + x public.greeting NULL, FAMILY "primary" (x, rowid), - CONSTRAINT check_x CHECK (x = 'hello':::test.public.greeting::test.public.greeting), - CONSTRAINT "check" CHECK ('hello':::test.public.greeting = 'hello':::test.public.greeting) + CONSTRAINT check_x CHECK (x = 'hello':::public.greeting::public.greeting), + CONSTRAINT "check" CHECK ('hello':::public.greeting = 'hello':::public.greeting) ) # Ensure that we can add check constraints to tables with enums. @@ -594,11 +594,11 @@ INSERT INTO enum_checks VALUES ('hi'), ('howdy'); ALTER TABLE enum_checks ADD CHECK (x > 'hello') # Ensure that checks are validated on insert. -statement error pq: failed to satisfy CHECK constraint \(x > 'hello':::test.public.greeting\) +statement error pq: failed to satisfy CHECK constraint \(x > 'hello':::public.greeting\) INSERT INTO enum_checks VALUES ('hello') # Try adding a check that fails validation. -statement error pq: validation of CHECK "x = 'hello':::test.public.greeting" failed +statement error pq: validation of CHECK "x = 'hello':::public.greeting" failed ALTER TABLE enum_checks ADD CHECK (x = 'hello') # Check the above cases, but in a transaction. @@ -609,7 +609,7 @@ CREATE TABLE enum_checks (x greeting); INSERT INTO enum_checks VALUES ('hi'), ('howdy'); ALTER TABLE enum_checks ADD CHECK (x > 'hello') -statement error pq: failed to satisfy CHECK constraint \(x > 'hello':::test.public.greeting\) +statement error pq: failed to satisfy CHECK constraint \(x > 'hello':::public.greeting\) INSERT INTO enum_checks VALUES ('hello') statement ok @@ -621,12 +621,52 @@ CREATE TABLE enum_checks (x greeting); INSERT INTO enum_checks VALUES ('hi'), ('howdy'); # Try adding a check that fails validation. -statement error pq: validation of CHECK "x = 'hello':::test.public.greeting" failed +statement error pq: validation of CHECK "x = 'hello':::public.greeting" failed ALTER TABLE enum_checks ADD CHECK (x = 'hello') statement ok ROLLBACK +# Test that cross database type references are disallowed. +statement ok +CREATE DATABASE other; +CREATE TYPE other.t AS ENUM ('other') + +# We can still reference other databases types when creating objects +# within those databases. +statement ok +CREATE TABLE other.tt (x other.t) + +# Referencing other databases in this database's objects will error. +statement error pq: cross database type references are not supported: other.public.t +CREATE TABLE cross_error (x other.t) + +# Test that we can't hide cross database references in expressions. +statement error pq: cross database type references are not supported: other.public.t +CREATE TABLE cross_error (x BOOL DEFAULT ('other':::other.t = 'other':::other.t)) + +statement error pq: cross database type references are not supported: other.public.t +CREATE TABLE cross_error (x BOOL AS ('other':::other.t = 'other':::other.t) STORED) + +statement error pq: cross database type references are not supported: other.public.t +CREATE TABLE cross_error (x INT, CHECK ('other':::other.t = 'other':::other.t)) + +# Test that we can't add columns or checks that use these either. +statement ok +CREATE TABLE cross_error (x INT) + +statement error pq: cross database type references are not supported: other.public.t +ALTER TABLE cross_error ADD COLUMN y other.t + +statement error pq: cross database type references are not supported: other.public.t +ALTER TABLE cross_error ADD COLUMN y BOOL DEFAULT ('other':::other.t = 'other':::other.t) + +statement error pq: cross database type references are not supported: other.public.t +ALTER TABLE cross_error ADD COLUMN y BOOL AS ('other':::other.t = 'other':::other.t) STORED + +statement error pq: cross database type references are not supported: other.public.t +ALTER TABLE cross_error ADD CHECK ('other':::other.t = 'other':::other.t) + subtest schema_changes # Ensure that we can drop and create indexes on user defined type columns, diff --git a/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning b/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning index c91d1626643b..10fcc5ba215e 100644 --- a/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning +++ b/pkg/sql/logictest/testdata/logic_test/experimental_distsql_planning @@ -14,9 +14,41 @@ SET CLUSTER SETTING sql.defaults.experimental_distsql_planning = on statement ok SET experimental_distsql_planning = always -# Test that a SELECT query fails but others don't. statement ok -CREATE TABLE kv (k INT PRIMARY KEY, v INT); INSERT INTO kv VALUES (1, 1), (2, 1) +CREATE TABLE kv (k INT PRIMARY KEY, v INT); INSERT INTO kv VALUES (1, 1), (2, 1), (3, 2) -statement error pq: unimplemented: experimental opt-driven distsql planning +query II colnames,rowsort SELECT * FROM kv +---- +k v +1 1 +2 1 +3 2 + +query I colnames,rowsort +SELECT k FROM kv +---- +k +1 +2 +3 + +query I colnames,rowsort +SELECT v FROM kv +---- +v +1 +1 +2 + +# Projections are not yet supported. +statement error pq: unimplemented: experimental opt-driven distsql planning +SELECT v, k FROM kv + +# Renders are not yet supported. +statement error pq: unimplemented: experimental opt-driven distsql planning +SELECT k + v FROM kv + +# Filters are not yet supported. +statement error pq: unimplemented: experimental opt-driven distsql planning +SELECT * FROM kv WHERE k > v diff --git a/pkg/sql/logictest/testdata/logic_test/geospatial b/pkg/sql/logictest/testdata/logic_test/geospatial index 43446e7ed8d8..885d1eeba8b9 100644 --- a/pkg/sql/logictest/testdata/logic_test/geospatial +++ b/pkg/sql/logictest/testdata/logic_test/geospatial @@ -98,6 +98,11 @@ SELECT ST_AsText(p) FROM (VALUES POINT (1 2) POINT (3 4) +query T +SELECT ST_AsText(ST_Project('POINT(0 0)'::geography, 100000, radians(45.0))) +---- +POINT (0.635231029125537 0.639472334729198) + subtest cast_test query T @@ -196,8 +201,8 @@ INSERT INTO parse_test (geom, geog) VALUES (ST_GeomFromText('POINT(1.0 2.0)'), ST_GeogFromText('POINT(1.0 2.0)')), (ST_GeomFromText('SRID=4326;POINT(1.0 2.0)'), ST_GeogFromText('SRID=4326;POINT(1.0 2.0)')), (ST_GeometryFromText('SRID=4004;POINT(1.0 2.0)'), ST_GeographyFromText('POINT(1.0 2.0)')), - (ST_GeomFromGeoJSON('{"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null}'), ST_GeogFromGeoJSON('{"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null}')), - (ST_GeomFromGeoJSON('{"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null}'::jsonb), ST_GeogFromGeoJSON('{"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null}'::jsonb)), + (ST_GeomFromGeoJSON('{"type":"Point","coordinates":[1,2]}'), ST_GeogFromGeoJSON('{"type":"Point","coordinates":[1,2]}')), + (ST_GeomFromGeoJSON('{"type":"Point","coordinates":[1,2]}'::jsonb), ST_GeogFromGeoJSON('{"type":"Point","coordinates":[1,2]}'::jsonb)), (ST_GeomFromWKB(decode('0101000000000000000000F03F000000000000F03F', 'hex')), ST_GeogFromWKB(decode('0101000000000000000000F03F000000000000F03F', 'hex'))), (ST_GeomFromEWKB(decode('0101000000000000000000F03F000000000000F03F', 'hex')), ST_GeogFromEWKB(decode('0101000000000000000000F03F000000000000F03F', 'hex'))), (st_geomfromgeojson('null':::jsonb), st_geogfromgeojson('null':::jsonb)) @@ -209,7 +214,7 @@ SELECT ---- true true -query TTTTTTTT +query TTTTTTT SELECT ST_AsText(geom), ST_AsEWKT(geom), @@ -217,25 +222,41 @@ SELECT ST_AsBinary(geom, 'ndr'), ST_AsBinary(geom, 'xdr'), ST_AsEWKB(geom), - ST_AsKML(geom), - ST_AsGeoJSON(geom) + ST_AsKML(geom) FROM parse_test ORDER BY id ASC ---- -POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4004;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 164 15 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 1) POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] -1,1 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null} -POINT (1 1) POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] -1,1 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null} -NULL NULL NULL NULL NULL NULL NULL NULL +POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4004;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 164 15 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 1) POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] +1,1 +POINT (1 1) POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] +1,1 +NULL NULL NULL NULL NULL NULL NULL + + +query TTT +SELECT + ST_AsGeoJSON(geom), + ST_AsGeoJSON(geom, 6, 8), + ST_AsGeoJSON(geom, 6, 5) +FROM parse_test ORDER BY id ASC +---- +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4004"}},"coordinates":[1,2]} {"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4004"}},"coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4004"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"coordinates":[1,2]} +{"type":"Point","coordinates":[1,1]} {"type":"Point","coordinates":[1,1]} {"type":"Point","bbox":[1,1,1,1],"coordinates":[1,1]} +{"type":"Point","coordinates":[1,1]} {"type":"Point","coordinates":[1,1]} {"type":"Point","bbox":[1,1,1,1],"coordinates":[1,1]} +NULL NULL NULL query TTTT SELECT @@ -254,7 +275,7 @@ FROM parse_test ORDER BY id ASC 0101000000000000000000F03F000000000000F03F 0101000000000000000000F03F000000000000F03F 0101000000000000000000F03F000000000000F03F 00000000013FF00000000000003FF0000000000000 NULL NULL NULL NULL -query TTTTTTTT +query TTTTTTT SELECT ST_AsText(geog), ST_AsEWKT(geog), @@ -262,25 +283,40 @@ SELECT ST_AsBinary(geog, 'ndr'), ST_AsBinary(geog, 'xdr'), ST_AsEWKB(geog), - ST_AsKML(geog), - ST_AsGeoJSON(geog) + ST_AsKML(geog) +FROM parse_test ORDER BY id ASC +---- +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] +1,2 +POINT (1 1) SRID=4326;POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] +1,1 +POINT (1 1) SRID=4326;POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] +1,1 +NULL NULL NULL NULL NULL NULL NULL + +query TTT +SELECT + ST_AsGeoJSON(geog), + ST_AsGeoJSON(geog, 6, 8), + ST_AsGeoJSON(geog, 6, 5) FROM parse_test ORDER BY id ASC ---- -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 2) SRID=4326;POINT (1 2) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] [0 0 0 0 1 63 240 0 0 0 0 0 0 64 0 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 0 64] -1,2 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,2]},"properties":null} -POINT (1 1) SRID=4326;POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] -1,1 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null} -POINT (1 1) SRID=4326;POINT (1 1) [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [1 1 0 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] [0 0 0 0 1 63 240 0 0 0 0 0 0 63 240 0 0 0 0 0 0] [1 1 0 0 32 230 16 0 0 0 0 0 0 0 0 240 63 0 0 0 0 0 0 240 63] -1,1 {"type":"Feature","geometry":{"type":"Point","coordinates":[1,1]},"properties":null} -NULL NULL NULL NULL NULL NULL NULL NULL +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,2]} {"type":"Point","coordinates":[1,2]} {"type":"Point","bbox":[1,2,1,2],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,2]} +{"type":"Point","coordinates":[1,1]} {"type":"Point","coordinates":[1,1]} {"type":"Point","bbox":[1,1,1,1],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,1]} +{"type":"Point","coordinates":[1,1]} {"type":"Point","coordinates":[1,1]} {"type":"Point","bbox":[1,1,1,1],"crs":{"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::4326"}},"coordinates":[1,1]} +NULL NULL NULL query TTTT SELECT @@ -473,16 +509,16 @@ SELECT FROM geom_operators_test a ORDER BY a.dsc ---- -Empty GeometryCollection POINT EMPTY POINT EMPTY -Empty LineString POINT EMPTY POINT EMPTY -Faraway point POINT (5 5) POINT (5 5) -Line going through left and right square POINT (0 0.5) POINT (-0.5 0.5) -NULL NULL NULL -Point middle of Left Square POINT (-0.5 0.5) POINT (-0.5 0.5) -Point middle of Right Square POINT (0.5 0.5) POINT (0.5 0.5) -Square (left) POINT (-0.5 0.5) POINT (-0.5 0.5) -Square (right) POINT (0.5 0.5) POINT (0.5 0.5) -Square overlapping left and right square POINT (0.4499999999999999 0.5) POINT (0.45 0.5) +Empty GeometryCollection POINT EMPTY POINT EMPTY +Empty LineString POINT EMPTY POINT EMPTY +Faraway point POINT (5 5) POINT (5 5) +Line going through left and right square POINT (0 0.5) POINT (-0.5 0.5) +NULL NULL NULL +Point middle of Left Square POINT (-0.5 0.5) POINT (-0.5 0.5) +Point middle of Right Square POINT (0.5 0.5) POINT (0.5 0.5) +Square (left) POINT (-0.5 0.5) POINT (-0.5 0.5) +Square (right) POINT (0.5 0.5) POINT (0.5 0.5) +Square overlapping left and right square POINT (0.45 0.5) POINT (0.45 0.5) # Functions which take in strings as input as well. query TT @@ -501,7 +537,7 @@ Point middle of Left Square POINT (-0.5 0.5) Point middle of Right Square POINT (0.5 0.5) Square (left) POINT (-0.5 0.5) Square (right) POINT (0.5 0.5) -Square overlapping left and right square POINT (0.4499999999999999 0.5) +Square overlapping left and right square POINT (0.45 0.5) # Binary operators query TTRR @@ -847,28 +883,25 @@ Square overlapping left and right square Square (left) Square overlapping left and right square Square (right) true false Square overlapping left and right square Square overlapping left and right square true false -# Buffer -- unfortunately due to floating point precision, these results can be off by small -# epsilon across operating systems. Until ST_AsEWKT with precision is implemented, we'll have to -# verify that it works by checking another statistic for now. -query TIII +query TTTT SELECT a.dsc, - ST_NPoints(ST_Buffer(a.geom, 10)), - ST_NPoints(ST_Buffer(a.geom, 10, 2)), - ST_NPoints(ST_Buffer(a.geom, 10, 'quad_segs=4 endcap=flat')) + ST_AsEWKT(ST_Buffer(a.geom, 10), 5), + ST_AsEWKT(ST_Buffer(a.geom, 10, 2), 5), + ST_AsEWKT(ST_Buffer(a.geom, 10, 'quad_segs=4 endcap=flat'), 5) FROM geom_operators_test a ORDER BY a.dsc ---- -Empty GeometryCollection 0 0 0 -Empty LineString 0 0 0 -Faraway point 33 9 0 -Line going through left and right square 35 11 5 -NULL NULL NULL NULL -Point middle of Left Square 33 9 0 -Point middle of Right Square 33 9 0 -Square (left) 37 13 21 -Square (right) 37 13 21 -Square overlapping left and right square 37 13 21 +Empty GeometryCollection POLYGON EMPTY POLYGON EMPTY POLYGON EMPTY +Empty LineString POLYGON EMPTY POLYGON EMPTY POLYGON EMPTY +Faraway point POLYGON ((15 5, 14.80785 3.0491, 14.2388 1.17317, 13.3147 -0.5557, 12.07107 -2.07107, 10.5557 -3.3147, 8.82683 -4.2388, 6.9509 -4.80785, 5 -5, 3.0491 -4.80785, 1.17317 -4.2388, -0.5557 -3.3147, -2.07107 -2.07107, -3.3147 -0.5557, -4.2388 1.17317, -4.80785 3.0491, -5 5, -4.80785 6.9509, -4.2388 8.82683, -3.3147 10.5557, -2.07107 12.07107, -0.5557 13.3147, 1.17317 14.2388, 3.0491 14.80785, 5 15, 6.9509 14.80785, 8.82683 14.2388, 10.5557 13.3147, 12.07107 12.07107, 13.3147 10.5557, 14.2388 8.82683, 14.80785 6.9509, 15 5)) POLYGON ((15 5, 12.07107 -2.07107, 5 -5, -2.07107 -2.07107, -5 5, -2.07107 12.07107, 5 15, 12.07107 12.07107, 15 5)) POLYGON EMPTY +Line going through left and right square POLYGON ((0.5 10.5, 2.4509 10.30785, 4.32683 9.7388, 6.0557 8.8147, 7.57107 7.57107, 8.8147 6.0557, 9.7388 4.32683, 10.30785 2.4509, 10.5 0.5, 10.30785 -1.4509, 9.7388 -3.32683, 8.8147 -5.0557, 7.57107 -6.57107, 6.0557 -7.8147, 4.32683 -8.7388, 2.4509 -9.30785, 0.5 -9.5, -0.5 -9.5, -2.4509 -9.30785, -4.32683 -8.7388, -6.0557 -7.8147, -7.57107 -6.57107, -8.8147 -5.0557, -9.7388 -3.32683, -10.30785 -1.4509, -10.5 0.5, -10.30785 2.4509, -9.7388 4.32683, -8.8147 6.0557, -7.57107 7.57107, -6.0557 8.8147, -4.32683 9.7388, -2.4509 10.30785, -0.5 10.5, 0.5 10.5)) POLYGON ((0.5 10.5, 7.57107 7.57107, 10.5 0.5, 7.57107 -6.57107, 0.5 -9.5, -0.5 -9.5, -7.57107 -6.57107, -10.5 0.5, -7.57107 7.57107, -0.5 10.5, 0.5 10.5)) POLYGON ((0.5 10.5, 0.5 -9.5, -0.5 -9.5, -0.5 10.5, 0.5 10.5)) +NULL NULL NULL NULL +Point middle of Left Square POLYGON ((9.5 0.5, 9.30785 -1.4509, 8.7388 -3.32683, 7.8147 -5.0557, 6.57107 -6.57107, 5.0557 -7.8147, 3.32683 -8.7388, 1.4509 -9.30785, -0.5 -9.5, -2.4509 -9.30785, -4.32683 -8.7388, -6.0557 -7.8147, -7.57107 -6.57107, -8.8147 -5.0557, -9.7388 -3.32683, -10.30785 -1.4509, -10.5 0.5, -10.30785 2.4509, -9.7388 4.32683, -8.8147 6.0557, -7.57107 7.57107, -6.0557 8.8147, -4.32683 9.7388, -2.4509 10.30785, -0.5 10.5, 1.4509 10.30785, 3.32683 9.7388, 5.0557 8.8147, 6.57107 7.57107, 7.8147 6.0557, 8.7388 4.32683, 9.30785 2.4509, 9.5 0.5)) POLYGON ((9.5 0.5, 6.57107 -6.57107, -0.5 -9.5, -7.57107 -6.57107, -10.5 0.5, -7.57107 7.57107, -0.5 10.5, 6.57107 7.57107, 9.5 0.5)) POLYGON EMPTY +Point middle of Right Square POLYGON ((10.5 0.5, 10.30785 -1.4509, 9.7388 -3.32683, 8.8147 -5.0557, 7.57107 -6.57107, 6.0557 -7.8147, 4.32683 -8.7388, 2.4509 -9.30785, 0.5 -9.5, -1.4509 -9.30785, -3.32683 -8.7388, -5.0557 -7.8147, -6.57107 -6.57107, -7.8147 -5.0557, -8.7388 -3.32683, -9.30785 -1.4509, -9.5 0.5, -9.30785 2.4509, -8.7388 4.32683, -7.8147 6.0557, -6.57107 7.57107, -5.0557 8.8147, -3.32683 9.7388, -1.4509 10.30785, 0.5 10.5, 2.4509 10.30785, 4.32683 9.7388, 6.0557 8.8147, 7.57107 7.57107, 8.8147 6.0557, 9.7388 4.32683, 10.30785 2.4509, 10.5 0.5)) POLYGON ((10.5 0.5, 7.57107 -6.57107, 0.5 -9.5, -6.57107 -6.57107, -9.5 0.5, -6.57107 7.57107, 0.5 10.5, 7.57107 7.57107, 10.5 0.5)) POLYGON EMPTY +Square (left) POLYGON ((-11 0, -11 1, -10.80785 2.9509, -10.2388 4.82683, -9.3147 6.5557, -8.07107 8.07107, -6.5557 9.3147, -4.82683 10.2388, -2.9509 10.80785, -1 11, 0 11, 1.9509 10.80785, 3.82683 10.2388, 5.5557 9.3147, 7.07107 8.07107, 8.3147 6.5557, 9.2388 4.82683, 9.80785 2.9509, 10 1, 10 0, 9.80785 -1.9509, 9.2388 -3.82683, 8.3147 -5.5557, 7.07107 -7.07107, 5.5557 -8.3147, 3.82683 -9.2388, 1.9509 -9.80785, 0 -10, -1 -10, -2.9509 -9.80785, -4.82683 -9.2388, -6.5557 -8.3147, -8.07107 -7.07107, -9.3147 -5.5557, -10.2388 -3.82683, -10.80785 -1.9509, -11 0)) POLYGON ((-11 0, -11 1, -8.07107 8.07107, -1 11, 0 11, 7.07107 8.07107, 10 1, 10 0, 7.07107 -7.07107, 0 -10, -1 -10, -8.07107 -7.07107, -11 0)) POLYGON ((-11 0, -11 1, -10.2388 4.82683, -8.07107 8.07107, -4.82683 10.2388, -1 11, 0 11, 3.82683 10.2388, 7.07107 8.07107, 9.2388 4.82683, 10 1, 10 0, 9.2388 -3.82683, 7.07107 -7.07107, 3.82683 -9.2388, 0 -10, -1 -10, -4.82683 -9.2388, -8.07107 -7.07107, -10.2388 -3.82683, -11 0)) +Square (right) POLYGON ((-10 0, -10 1, -9.80785 2.9509, -9.2388 4.82683, -8.3147 6.5557, -7.07107 8.07107, -5.5557 9.3147, -3.82683 10.2388, -1.9509 10.80785, 0 11, 1 11, 2.9509 10.80785, 4.82683 10.2388, 6.5557 9.3147, 8.07107 8.07107, 9.3147 6.5557, 10.2388 4.82683, 10.80785 2.9509, 11 1, 11 0, 10.80785 -1.9509, 10.2388 -3.82683, 9.3147 -5.5557, 8.07107 -7.07107, 6.5557 -8.3147, 4.82683 -9.2388, 2.9509 -9.80785, 1 -10, 0 -10, -1.9509 -9.80785, -3.82683 -9.2388, -5.5557 -8.3147, -7.07107 -7.07107, -8.3147 -5.5557, -9.2388 -3.82683, -9.80785 -1.9509, -10 0)) POLYGON ((-10 0, -10 1, -7.07107 8.07107, 0 11, 1 11, 8.07107 8.07107, 11 1, 11 0, 8.07107 -7.07107, 1 -10, 0 -10, -7.07107 -7.07107, -10 0)) POLYGON ((-10 0, -10 1, -9.2388 4.82683, -7.07107 8.07107, -3.82683 10.2388, 0 11, 1 11, 4.82683 10.2388, 8.07107 8.07107, 10.2388 4.82683, 11 1, 11 0, 10.2388 -3.82683, 8.07107 -7.07107, 4.82683 -9.2388, 1 -10, 0 -10, -3.82683 -9.2388, -7.07107 -7.07107, -9.2388 -3.82683, -10 0)) +Square overlapping left and right square POLYGON ((-10.1 0, -10.1 1, -9.90785 2.9509, -9.3388 4.82683, -8.4147 6.5557, -7.17107 8.07107, -5.6557 9.3147, -3.92683 10.2388, -2.0509 10.80785, -0.1 11, 1 11, 2.9509 10.80785, 4.82683 10.2388, 6.5557 9.3147, 8.07107 8.07107, 9.3147 6.5557, 10.2388 4.82683, 10.80785 2.9509, 11 1, 11 0, 10.80785 -1.9509, 10.2388 -3.82683, 9.3147 -5.5557, 8.07107 -7.07107, 6.5557 -8.3147, 4.82683 -9.2388, 2.9509 -9.80785, 1 -10, -0.1 -10, -2.0509 -9.80785, -3.92683 -9.2388, -5.6557 -8.3147, -7.17107 -7.07107, -8.4147 -5.5557, -9.3388 -3.82683, -9.90785 -1.9509, -10.1 0)) POLYGON ((-10.1 0, -10.1 1, -7.17107 8.07107, -0.1 11, 1 11, 8.07107 8.07107, 11 1, 11 0, 8.07107 -7.07107, 1 -10, -0.1 -10, -7.17107 -7.07107, -10.1 0)) POLYGON ((-10.1 0, -10.1 1, -9.3388 4.82683, -7.17107 8.07107, -3.92683 10.2388, -0.1 11, 1 11, 4.82683 10.2388, 8.07107 8.07107, 10.2388 4.82683, 11 1, 11 0, 10.2388 -3.82683, 8.07107 -7.07107, 4.82683 -9.2388, 1 -10, -0.1 -10, -3.92683 -9.2388, -7.17107 -7.07107, -9.3388 -3.82683, -10.1 0)) # Test raw string with ST_Buffer query I @@ -1505,16 +1538,16 @@ SELECT FROM geog_operators_test ORDER BY dsc ---- -Empty GeometryCollection GEOMETRYCOLLECTION EMPTY GEOMETRYCOLLECTION EMPTY -Empty LineString LINESTRING EMPTY LINESTRING EMPTY -Faraway point POINT (5 5) POINT (5 5) -Line going through left and right square LINESTRING (-0.5 0.5, -0.00000000000000009939611878359099 0.5000190382262164, 0.5 0.5) LINESTRING (-0.5 0.5, -0.25000000036247944 0.500014278647005, -0.00000000000000009939611878359099 0.5000190382262164, 0.2500000003624792 0.5000142786470051, 0.5 0.5) -NULL NULL NULL -Point middle of Left Square POINT (-0.5 0.5) POINT (-0.5 0.5) -Point middle of Right Square POINT (0.5 0.5) POINT (0.5 0.5) -Square (left) POLYGON ((-1 0, -0.5000000000000001 0, 0 0, 0 0.5, 0 1, -0.4999999999999998 1.0000380706528733, -1 1, -0.9999999999999998 0.5000000000000001, -1 0)) POLYGON ((-1 0, -0.7499999999999998 0, -0.5000000000000001 0, -0.2499999999999997 0, 0 0, 0 0.25, 0 0.5, 0 0.75, 0 1, -0.2499999985501929 1.0000285529443267, -0.4999999999999998 1.0000380706528733, -0.7500000014498067 1.0000285529443265, -1 1, -1 0.7499999999999998, -0.9999999999999998 0.5000000000000001, -0.9999999999999998 0.25, -1 0)) -Square (right) POLYGON ((0 0, 0.5 0, 1 0, 1 0.4999999999999999, 1 1, 0.5 1.0000380706528733, 0 1, 0 0.5000000000000001, 0 0)) POLYGON ((0 0, 0.25 0, 0.5 0, 0.75 0, 1 0, 0.9999999999999998 0.25, 1 0.4999999999999999, 0.9999999999999998 0.7499999999999999, 1 1, 0.750000001449807 1.0000285529443267, 0.5 1.0000380706528733, 0.2499999985501931 1.0000285529443267, 0 1, 0 0.7499999999999998, 0 0.5000000000000001, 0 0.2499999999999997, 0 0)) -Square overlapping left and right square POLYGON ((-0.1 0, 0.44999999999999996 0, 1 0, 1 0.4999999999999999, 1 1, 0.44999999999999996 1.0000460657968335, -0.1 1, -0.1 0.5000000000000001, -0.1 0)) POLYGON ((-0.1 0, 0.17500000000000007 0, 0.44999999999999996 0, 0.7249999999999999 0, 1 0, 0.9999999999999998 0.25, 1 0.4999999999999999, 0.9999999999999998 0.7499999999999999, 1 1, 0.7250000019297163 1.0000345492812595, 0.44999999999999996 1.0000460657968335, 0.17499999807028374 1.0000345492812592, -0.1 1, -0.1 0.75, -0.1 0.5000000000000001, -0.10000000000000002 0.2499999999999997, -0.1 0)) +Empty GeometryCollection GEOMETRYCOLLECTION EMPTY GEOMETRYCOLLECTION EMPTY +Empty LineString LINESTRING EMPTY LINESTRING EMPTY +Faraway point POINT (5 5) POINT (5 5) +Line going through left and right square LINESTRING (-0.5 0.5, -0 0.500019038226216, 0.5 0.5) LINESTRING (-0.5 0.5, -0.250000000362479 0.500014278647005, -0 0.500019038226216, 0.250000000362479 0.500014278647005, 0.5 0.5) +NULL NULL NULL +Point middle of Left Square POINT (-0.5 0.5) POINT (-0.5 0.5) +Point middle of Right Square POINT (0.5 0.5) POINT (0.5 0.5) +Square (left) POLYGON ((-1 0, -0.5 0, 0 0, 0 0.5, 0 1, -0.5 1.000038070652873, -1 1, -1 0.5, -1 0)) POLYGON ((-1 0, -0.75 0, -0.5 0, -0.25 0, 0 0, 0 0.25, 0 0.5, 0 0.75, 0 1, -0.249999998550193 1.000028552944327, -0.5 1.000038070652873, -0.750000001449807 1.000028552944326, -1 1, -1 0.75, -1 0.5, -1 0.25, -1 0)) +Square (right) POLYGON ((0 0, 0.5 0, 1 0, 1 0.5, 1 1, 0.5 1.000038070652873, 0 1, 0 0.5, 0 0)) POLYGON ((0 0, 0.25 0, 0.5 0, 0.75 0, 1 0, 1 0.25, 1 0.5, 1 0.75, 1 1, 0.750000001449807 1.000028552944327, 0.5 1.000038070652873, 0.249999998550193 1.000028552944327, 0 1, 0 0.75, 0 0.5, 0 0.25, 0 0)) +Square overlapping left and right square POLYGON ((-0.1 0, 0.45 0, 1 0, 1 0.5, 1 1, 0.45 1.000046065796834, -0.1 1, -0.1 0.5, -0.1 0)) POLYGON ((-0.1 0, 0.175 0, 0.45 0, 0.725 0, 1 0, 1 0.25, 1 0.5, 1 0.75, 1 1, 0.725000001929716 1.000034549281259, 0.45 1.000046065796834, 0.174999998070284 1.000034549281259, -0.1 1, -0.1 0.75, -0.1 0.5, -0.1 0.25, -0.1 0)) query T SELECT ST_AsText(ST_Segmentize('MULTIPOINT (0 0, 1 1)'::geography, -1)) @@ -1551,43 +1584,6 @@ MULTIPOINT (0 0, 1 1) statement error st_segmentize\(\): maximum segment length must be positive SELECT ST_Segmentize('POLYGON ((0.0 0.0, 1.0 0.0, 1.0 1.0, 0.0 1.0, 0.0 0.0))'::geometry, -1) -subtest pg_extension - -statement ok -CREATE TABLE pg_extension_test ( - a geography(point, 4326), - b geometry(linestring, 3857), - c geometry, - d geography -) - -query TTTTIIT rowsort -SELECT * FROM pg_extension.geography_columns WHERE f_table_name = 'pg_extension_test' ----- -test public pg_extension_test a 2 4326 POINT -test public pg_extension_test d NULL 0 GEOMETRY - -query TTTTIIT rowsort -SELECT * FROM pg_extension.geometry_columns WHERE f_table_name = 'pg_extension_test' ----- -test public pg_extension_test b 2 3857 LINESTRING -test public pg_extension_test c 2 0 GEOMETRY - -query TTTTIIT rowsort -SELECT * FROM geography_columns WHERE f_table_name = 'pg_extension_test' ----- -test public pg_extension_test a 2 4326 POINT -test public pg_extension_test d NULL 0 GEOMETRY - -query TTTTIIT rowsort -SELECT * FROM geometry_columns WHERE f_table_name = 'pg_extension_test' ----- -test public pg_extension_test b 2 3857 LINESTRING -test public pg_extension_test c 2 0 GEOMETRY - -statement error not yet implemented -SELECT * FROM pg_extension.spatial_ref_sys ORDER BY srid ASC - subtest st_srid statement ok @@ -1818,36 +1814,36 @@ JOIN (VALUES (0.0), (0.2), (0.5), (0.51), (1.0)) b(fraction) ON (1=1) JOIN (VALUES (true), (false)) c(repeat) ON (1=1) ORDER BY a.dsc, b.fraction, c.repeat ---- -Empty LineString 0.0 false POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.0 true POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.2 false POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.2 true POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.5 false POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.5 true POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.51 false POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 0.51 true POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 1.0 false POINT EMPTY POINT EMPTY POINT EMPTY -Empty LineString 1.0 true POINT EMPTY POINT EMPTY POINT EMPTY -LineString anticlockwise covering all the quadrants 0.0 false POINT (1 -1) POINT (1 -1) POINT (1 -1) -LineString anticlockwise covering all the quadrants 0.0 true POINT (1 -1) POINT (1 -1) POINT (1 -1) -LineString anticlockwise covering all the quadrants 0.2 false POINT (1.6529822128134706 0.9589466384404113) MULTIPOINT (1.6529822128134706 0.9589466384404113, 1.032455532033675 2, -1.0324555320336777 2, -1.65298221281347 0.9589466384404097, -1 -1) POINT (1.6529822128134706 0.9589466384404113) -LineString anticlockwise covering all the quadrants 0.2 true POINT (1.6529822128134706 0.9589466384404113) MULTIPOINT (1.6529822128134706 0.9589466384404113, 1.032455532033675 2, -1.0324555320336777 2, -1.65298221281347 0.9589466384404097, -1 -1) MULTIPOINT (1.6529822128134706 0.9589466384404113, 1.032455532033675 2, -1.0324555320336777 2, -1.65298221281347 0.9589466384404097, -1 -1) -LineString anticlockwise covering all the quadrants 0.5 false POINT (-0.0000000000000004440892098500626 2) MULTIPOINT (-0.0000000000000004440892098500626 2, -1 -1) POINT (-0.0000000000000004440892098500626 2) -LineString anticlockwise covering all the quadrants 0.5 true POINT (-0.0000000000000004440892098500626 2) MULTIPOINT (-0.0000000000000004440892098500626 2, -1 -1) MULTIPOINT (-0.0000000000000004440892098500626 2, -1 -1) -LineString anticlockwise covering all the quadrants 0.51 false POINT (-0.10324555320336826 2) POINT (-0.10324555320336826 2) POINT (-0.10324555320336826 2) -LineString anticlockwise covering all the quadrants 0.51 true POINT (-0.10324555320336826 2) POINT (-0.10324555320336826 2) POINT (-0.10324555320336826 2) -LineString anticlockwise covering all the quadrants 1.0 false POINT (-1 -1) POINT (-1 -1) POINT (-1 -1) -LineString anticlockwise covering all the quadrants 1.0 true POINT (-1 -1) POINT (-1 -1) POINT (-1 -1) -LineString clockwise covering all the quadrants with SRID 4004 0.0 false SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) -LineString clockwise covering all the quadrants with SRID 4004 0.0 true SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) -LineString clockwise covering all the quadrants with SRID 4004 0.2 false SRID=4004;POINT (-0.8324555320336762 -1) SRID=4004;MULTIPOINT (-0.8324555320336762 -1, -1.5264911064067355 0.579473319220206, -1.6649110640673515 2, 0.16754446796632472 2, 2 2) SRID=4004;POINT (-0.8324555320336762 -1) -LineString clockwise covering all the quadrants with SRID 4004 0.2 true SRID=4004;POINT (-0.8324555320336762 -1) SRID=4004;MULTIPOINT (-0.8324555320336762 -1, -1.5264911064067355 0.579473319220206, -1.6649110640673515 2, 0.16754446796632472 2, 2 2) SRID=4004;MULTIPOINT (-0.8324555320336762 -1, -1.5264911064067355 0.579473319220206, -1.6649110640673515 2, 0.16754446796632472 2, 2 2) -LineString clockwise covering all the quadrants with SRID 4004 0.5 false SRID=4004;POINT (-1.816227766016838 1.448683298050514) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) SRID=4004;POINT (-1.816227766016838 1.448683298050514) -LineString clockwise covering all the quadrants with SRID 4004 0.5 true SRID=4004;POINT (-1.816227766016838 1.448683298050514) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) -LineString clockwise covering all the quadrants with SRID 4004 0.51 false SRID=4004;POINT (-1.845201431977848 1.5356042959335445) SRID=4004;POINT (-1.845201431977848 1.5356042959335445) SRID=4004;POINT (-1.845201431977848 1.5356042959335445) -LineString clockwise covering all the quadrants with SRID 4004 0.51 true SRID=4004;POINT (-1.845201431977848 1.5356042959335445) SRID=4004;POINT (-1.845201431977848 1.5356042959335445) SRID=4004;POINT (-1.845201431977848 1.5356042959335445) -LineString clockwise covering all the quadrants with SRID 4004 1.0 false SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) -LineString clockwise covering all the quadrants with SRID 4004 1.0 true SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) +Empty LineString 0.0 false POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.0 true POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.2 false POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.2 true POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.5 false POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.5 true POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.51 false POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 0.51 true POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 1.0 false POINT EMPTY POINT EMPTY POINT EMPTY +Empty LineString 1.0 true POINT EMPTY POINT EMPTY POINT EMPTY +LineString anticlockwise covering all the quadrants 0.0 false POINT (1 -1) POINT (1 -1) POINT (1 -1) +LineString anticlockwise covering all the quadrants 0.0 true POINT (1 -1) POINT (1 -1) POINT (1 -1) +LineString anticlockwise covering all the quadrants 0.2 false POINT (1.652982212813471 0.958946638440411) MULTIPOINT (1.652982212813471 0.958946638440411, 1.032455532033675 2, -1.032455532033678 2, -1.65298221281347 0.95894663844041, -1 -1) POINT (1.652982212813471 0.958946638440411) +LineString anticlockwise covering all the quadrants 0.2 true POINT (1.652982212813471 0.958946638440411) MULTIPOINT (1.652982212813471 0.958946638440411, 1.032455532033675 2, -1.032455532033678 2, -1.65298221281347 0.95894663844041, -1 -1) MULTIPOINT (1.652982212813471 0.958946638440411, 1.032455532033675 2, -1.032455532033678 2, -1.65298221281347 0.95894663844041, -1 -1) +LineString anticlockwise covering all the quadrants 0.5 false POINT (-0 2) MULTIPOINT (-0 2, -1 -1) POINT (-0 2) +LineString anticlockwise covering all the quadrants 0.5 true POINT (-0 2) MULTIPOINT (-0 2, -1 -1) MULTIPOINT (-0 2, -1 -1) +LineString anticlockwise covering all the quadrants 0.51 false POINT (-0.103245553203368 2) POINT (-0.103245553203368 2) POINT (-0.103245553203368 2) +LineString anticlockwise covering all the quadrants 0.51 true POINT (-0.103245553203368 2) POINT (-0.103245553203368 2) POINT (-0.103245553203368 2) +LineString anticlockwise covering all the quadrants 1.0 false POINT (-1 -1) POINT (-1 -1) POINT (-1 -1) +LineString anticlockwise covering all the quadrants 1.0 true POINT (-1 -1) POINT (-1 -1) POINT (-1 -1) +LineString clockwise covering all the quadrants with SRID 4004 0.0 false SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) +LineString clockwise covering all the quadrants with SRID 4004 0.0 true SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) SRID=4004;POINT (1 -1) +LineString clockwise covering all the quadrants with SRID 4004 0.2 false SRID=4004;POINT (-0.832455532033676 -1) SRID=4004;MULTIPOINT (-0.832455532033676 -1, -1.526491106406735 0.579473319220206, -1.664911064067351 2, 0.167544467966325 2, 2 2) SRID=4004;POINT (-0.832455532033676 -1) +LineString clockwise covering all the quadrants with SRID 4004 0.2 true SRID=4004;POINT (-0.832455532033676 -1) SRID=4004;MULTIPOINT (-0.832455532033676 -1, -1.526491106406735 0.579473319220206, -1.664911064067351 2, 0.167544467966325 2, 2 2) SRID=4004;MULTIPOINT (-0.832455532033676 -1, -1.526491106406735 0.579473319220206, -1.664911064067351 2, 0.167544467966325 2, 2 2) +LineString clockwise covering all the quadrants with SRID 4004 0.5 false SRID=4004;POINT (-1.816227766016838 1.448683298050514) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) SRID=4004;POINT (-1.816227766016838 1.448683298050514) +LineString clockwise covering all the quadrants with SRID 4004 0.5 true SRID=4004;POINT (-1.816227766016838 1.448683298050514) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) SRID=4004;MULTIPOINT (-1.816227766016838 1.448683298050514, 2 2) +LineString clockwise covering all the quadrants with SRID 4004 0.51 false SRID=4004;POINT (-1.845201431977848 1.535604295933545) SRID=4004;POINT (-1.845201431977848 1.535604295933545) SRID=4004;POINT (-1.845201431977848 1.535604295933545) +LineString clockwise covering all the quadrants with SRID 4004 0.51 true SRID=4004;POINT (-1.845201431977848 1.535604295933545) SRID=4004;POINT (-1.845201431977848 1.535604295933545) SRID=4004;POINT (-1.845201431977848 1.535604295933545) +LineString clockwise covering all the quadrants with SRID 4004 1.0 false SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) +LineString clockwise covering all the quadrants with SRID 4004 1.0 true SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) SRID=4004;POINT (2 2) statement error st_lineinterpolatepoint\(\): fraction -1.000000 should be within \[0 1\] range SELECT ST_LineInterpolatePoint('LINESTRING (0 0, 1 1)'::geometry, -1) diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index cddabbbedda5..19ae73e049f9 100644 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -53,43 +53,43 @@ DROP DATABASE information_schema CASCADE # Verify information_schema tables handle mutation statements correctly. -statement error user root does not have DROP privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables RENAME TO information_schema.bad -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables RENAME COLUMN x TO y -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables ADD COLUMN x DECIMAL -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables DROP COLUMN x -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables ADD CONSTRAINT foo UNIQUE (b) -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables DROP CONSTRAINT bar -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables ALTER COLUMN x SET DEFAULT 'foo' -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified ALTER TABLE information_schema.tables ALTER x DROP NOT NULL -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified CREATE INDEX i on information_schema.tables (x) -statement error user root does not have DROP privilege on relation tables +statement error tables is a virtual object and cannot be modified DROP TABLE information_schema.tables -statement error user root does not have CREATE privilege on relation tables +statement error tables is a virtual object and cannot be modified DROP INDEX information_schema.tables@i -statement error user root does not have GRANT privilege on relation tables +statement error tables is a virtual object and cannot be modified GRANT CREATE ON information_schema.tables TO root -statement error user root does not have GRANT privilege on relation tables +statement error tables is a virtual object and cannot be modified REVOKE CREATE ON information_schema.tables FROM root @@ -104,7 +104,7 @@ INSERT INTO information_schema.tables VALUES ('abc') statement error user root does not have UPDATE privilege on relation tables UPDATE information_schema.tables SET a = 'abc' -statement error user root does not have DROP privilege on relation tables +statement error tables is a virtual object and cannot be modified TRUNCATE TABLE information_schema.tables diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 711b3c5f2fdd..24c11c57b9cd 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -2478,3 +2478,59 @@ query B SELECT rolcanlogin FROM pg_roles WHERE rolname = 'role_test_nologin'; ---- false + +subtest mutations + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables RENAME TO pg_catalog.bad + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables RENAME COLUMN x TO y + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables ADD COLUMN x DECIMAL + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables DROP COLUMN x + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables ADD CONSTRAINT foo UNIQUE (b) + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables DROP CONSTRAINT bar + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables ALTER COLUMN x SET DEFAULT 'foo' + +statement error pg_tables is a system catalog +ALTER TABLE pg_catalog.pg_tables ALTER x DROP NOT NULL + +statement error pg_tables is a system catalog +CREATE INDEX i on pg_catalog.pg_tables (x) + +statement error pg_tables is a system catalog +DROP TABLE pg_catalog.pg_tables + +statement error pg_tables is a system catalog +DROP INDEX pg_catalog.pg_tables@i + +statement error pg_tables is a system catalog +GRANT CREATE ON pg_catalog.pg_tables TO root + +statement error pg_tables is a system catalog +REVOKE CREATE ON pg_catalog.pg_tables FROM root + +# Verify pg_catalog tables handles read-only property correctly. + +query error user root does not have DELETE privilege on relation pg_tables +DELETE FROM pg_catalog.pg_tables + +query error user root does not have INSERT privilege on relation pg_tables +INSERT INTO pg_catalog.pg_tables VALUES ('abc') + +statement error user root does not have UPDATE privilege on relation pg_tables +UPDATE pg_catalog.pg_tables SET a = 'abc' + +statement error pg_tables is a system catalog +TRUNCATE TABLE pg_catalog.pg_tables + diff --git a/pkg/sql/logictest/testdata/logic_test/pg_extension b/pkg/sql/logictest/testdata/logic_test/pg_extension new file mode 100644 index 000000000000..b77d385a4b13 --- /dev/null +++ b/pkg/sql/logictest/testdata/logic_test/pg_extension @@ -0,0 +1,43 @@ +statement ok +CREATE TABLE pg_extension_test ( + a geography(point, 4326), + b geometry(linestring, 3857), + c geometry, + d geography +) + +query TTTTIIT rowsort +SELECT * FROM pg_extension.geography_columns WHERE f_table_name = 'pg_extension_test' +---- +test public pg_extension_test a 2 4326 POINT +test public pg_extension_test d NULL 0 GEOMETRY + +query TTTTIIT rowsort +SELECT * FROM pg_extension.geometry_columns WHERE f_table_name = 'pg_extension_test' +---- +test public pg_extension_test b 2 3857 LINESTRING +test public pg_extension_test c 2 0 GEOMETRY + +query TTTTIIT rowsort +SELECT * FROM geography_columns WHERE f_table_name = 'pg_extension_test' +---- +test public pg_extension_test a 2 4326 POINT +test public pg_extension_test d NULL 0 GEOMETRY + +query TTTTIIT rowsort +SELECT * FROM geometry_columns WHERE f_table_name = 'pg_extension_test' +---- +test public pg_extension_test b 2 3857 LINESTRING +test public pg_extension_test c 2 0 GEOMETRY + +query ITITT +SELECT * FROM pg_extension.spatial_ref_sys WHERE srid IN (3857, 4326) ORDER BY srid ASC +---- +3857 EPSG 3857 PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]] +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +4326 EPSG 4326 GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]] +proj=longlat +datum=WGS84 +no_defs + +query ITITT +SELECT * FROM spatial_ref_sys WHERE srid IN (3857, 4326) ORDER BY srid ASC +---- +3857 EPSG 3857 PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]] +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +4326 EPSG 4326 GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]] +proj=longlat +datum=WGS84 +no_defs diff --git a/pkg/sql/logictest/testdata/logic_test/prepare b/pkg/sql/logictest/testdata/logic_test/prepare index 1eef588e572a..b749c3602a06 100644 --- a/pkg/sql/logictest/testdata/logic_test/prepare +++ b/pkg/sql/logictest/testdata/logic_test/prepare @@ -1140,6 +1140,7 @@ EXECUTE e ---- select ├── columns: k:1 str:2 + ├── immutable ├── stats: [rows=333.333333] ├── cost: 1050.03 ├── key: (1) @@ -1153,7 +1154,7 @@ select │ ├── fd: (1)-->(2) │ └── prune: (1,2) └── filters - └── (k:1 % 2) = 1 [outer=(1)] + └── (k:1 % 2) = 1 [outer=(1), immutable] # Only root may use PREPARE AS OPT PLAN. diff --git a/pkg/sql/logictest/testdata/logic_test/views b/pkg/sql/logictest/testdata/logic_test/views index 11a44d60b442..b846703e7087 100644 --- a/pkg/sql/logictest/testdata/logic_test/views +++ b/pkg/sql/logictest/testdata/logic_test/views @@ -688,3 +688,33 @@ statement ok CREATE OR REPLACE VIEW tview AS SELECT x AS x, x+1 AS x1, x+2 AS x2, x+3 AS x3 FROM t2 user root + +# Ensure a view that contains a table that is referenced multiple times with +# different column sets depends on the correct columns. +# Depended on columns should not be droppable. + +# Only column a should be depended on in this case. +statement ok +DROP TABLE ab CASCADE; +CREATE TABLE ab (a INT, b INT); +CREATE VIEW vab (x) AS SELECT ab.a FROM ab, ab AS ab2 + +statement ok +ALTER TABLE ab DROP COLUMN b + +statement error pq: cannot drop column "a" because view "vab" depends on it +ALTER TABLE ab DROP COLUMN a + +statement ok +CREATE TABLE abc (a INT, b INT, c INT); +CREATE VIEW vabc AS SELECT abc.a, abc2.b, abc3.c FROM abc, abc AS abc2, abc AS abc3 + +# All three columns a,b,c should not be droppable. +statement error pq: cannot drop column "a" because view "vabc" depends on it +ALTER TABLE abc DROP COLUMN a + +statement error pq: cannot drop column "b" because view "vabc" depends on it +ALTER TABLE abc DROP COLUMN b + +statement error pq: cannot drop column "c" because view "vabc" depends on it +ALTER TABLE abc DROP COLUMN c diff --git a/pkg/sql/namespace_test.go b/pkg/sql/namespace_test.go index 95ce1a366dd7..384bd6d00f56 100644 --- a/pkg/sql/namespace_test.go +++ b/pkg/sql/namespace_test.go @@ -15,7 +15,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -128,7 +127,7 @@ func TestNamespaceTableSemantics(t *testing.T) { } mKey := sqlbase.MakeDescMetadataKey(codec, sqlbase.ID(idCounter)) // Fill the dummy descriptor with garbage. - desc := sql.InitTableDescriptor( + desc := sqlbase.InitTableDescriptor( sqlbase.ID(idCounter), dbID, keys.PublicSchemaID, @@ -140,7 +139,7 @@ func TestNamespaceTableSemantics(t *testing.T) { if err := desc.AllocateIDs(); err != nil { t.Fatal(err) } - if err := kvDB.Put(ctx, mKey, sqlbase.WrapDescriptor(&desc)); err != nil { + if err := kvDB.Put(ctx, mKey, desc.DescriptorProto()); err != nil { t.Fatal(err) } diff --git a/pkg/sql/old_foreign_key_desc_test.go b/pkg/sql/old_foreign_key_desc_test.go index fcaa2aedfdbf..146318ced5ad 100644 --- a/pkg/sql/old_foreign_key_desc_test.go +++ b/pkg/sql/old_foreign_key_desc_test.go @@ -108,7 +108,7 @@ CREATE INDEX ON t.t1 (x); } err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() - newDesc := downgradeForeignKey(desc) + newDesc := sqlbase.NewImmutableTableDescriptor(*downgradeForeignKey(desc)) if err := catalogkv.WriteDescToBatch(ctx, false, s.ClusterSettings(), b, keys.SystemSQLCodec, desc.ID, newDesc); err != nil { return err } diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain b/pkg/sql/opt/exec/execbuilder/testdata/explain index 97aea455bc44..2f8f2334d26d 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain @@ -987,6 +987,7 @@ EXPLAIN (OPT, VERBOSE) SELECT * FROM tc WHERE a + 2 * b > 1 ORDER BY a*b ---- sort ├── columns: a:1 b:2 [hidden: column4:4] + ├── immutable ├── stats: [rows=333.333333] ├── cost: 1179.25548 ├── fd: (1,2)-->(4) @@ -995,6 +996,7 @@ sort ├── interesting orderings: (+1) └── project ├── columns: column4:4 a:1 b:2 + ├── immutable ├── stats: [rows=333.333333] ├── cost: 1116.70667 ├── fd: (1,2)-->(4) @@ -1002,6 +1004,7 @@ sort ├── interesting orderings: (+1) ├── select │ ├── columns: a:1 b:2 + │ ├── immutable │ ├── stats: [rows=333.333333] │ ├── cost: 1110.03 │ ├── interesting orderings: (+1) @@ -1012,15 +1015,16 @@ sort │ │ ├── prune: (1,2) │ │ └── interesting orderings: (+1) │ └── filters - │ └── (a:1 + (b:2 * 2)) > 1 [outer=(1,2)] + │ └── (a:1 + (b:2 * 2)) > 1 [outer=(1,2), immutable] └── projections - └── a:1 * b:2 [as=column4:4, outer=(1,2)] + └── a:1 * b:2 [as=column4:4, outer=(1,2), immutable] query T EXPLAIN (OPT, TYPES) SELECT * FROM tc WHERE a + 2 * b > 1 ORDER BY a*b ---- sort ├── columns: a:1(int) b:2(int) [hidden: column4:4(int)] + ├── immutable ├── stats: [rows=333.333333] ├── cost: 1179.25548 ├── fd: (1,2)-->(4) @@ -1029,6 +1033,7 @@ sort ├── interesting orderings: (+1) └── project ├── columns: column4:4(int) a:1(int) b:2(int) + ├── immutable ├── stats: [rows=333.333333] ├── cost: 1116.70667 ├── fd: (1,2)-->(4) @@ -1036,6 +1041,7 @@ sort ├── interesting orderings: (+1) ├── select │ ├── columns: a:1(int) b:2(int) + │ ├── immutable │ ├── stats: [rows=333.333333] │ ├── cost: 1110.03 │ ├── interesting orderings: (+1) @@ -1046,7 +1052,7 @@ sort │ │ ├── prune: (1,2) │ │ └── interesting orderings: (+1) │ └── filters - │ └── gt [type=bool, outer=(1,2)] + │ └── gt [type=bool, outer=(1,2), immutable] │ ├── plus [type=int] │ │ ├── variable: a:1 [type=int] │ │ └── mult [type=int] @@ -1054,7 +1060,7 @@ sort │ │ └── const: 2 [type=int] │ └── const: 1 [type=int] └── projections - └── mult [as=column4:4, type=int, outer=(1,2)] + └── mult [as=column4:4, type=int, outer=(1,2), immutable] ├── variable: a:1 [type=int] └── variable: b:2 [type=int] diff --git a/pkg/sql/opt/memo/expr.go b/pkg/sql/opt/memo/expr.go index 88b98efbd42d..c2b14711cbd6 100644 --- a/pkg/sql/opt/memo/expr.go +++ b/pkg/sql/opt/memo/expr.go @@ -631,7 +631,7 @@ func (prj *ProjectExpr) initUnexportedFields(mem *Memo) { // // We only add the FD if composite types are not involved. // - // TODO(radu): add a whitelist of expressions/operators that are ok, like + // TODO(radu): add a allowlist of expressions/operators that are ok, like // arithmetic. composite := false for i, ok := from.Next(0); ok; i, ok = from.Next(i + 1) { diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index 9742f3a12cca..3510f87ffd13 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -566,8 +566,15 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { if dep.SpecificIndex { fmt.Fprintf(f.Buffer, "@%s", dep.DataSource.(cat.Table).Index(dep.Index).Name()) } - if !dep.ColumnOrdinals.Empty() { - fmt.Fprintf(f.Buffer, " [columns: %s]", dep.ColumnOrdinals) + colNames, isTable := dep.GetColumnNames() + if len(colNames) > 0 { + fmt.Fprintf(f.Buffer, " [columns:") + for _, colName := range colNames { + fmt.Fprintf(f.Buffer, " %s", colName) + } + fmt.Fprintf(f.Buffer, "]") + } else if isTable { + fmt.Fprintf(f.Buffer, " [no columns]") } n.Child(f.Buffer.String()) } diff --git a/pkg/sql/opt/memo/logical_props_builder.go b/pkg/sql/opt/memo/logical_props_builder.go index 04c4de1855fe..2f22176d11d3 100644 --- a/pkg/sql/opt/memo/logical_props_builder.go +++ b/pkg/sql/opt/memo/logical_props_builder.go @@ -1372,6 +1372,9 @@ func BuildSharedProps(e opt.Expr, shared *props.Shared) { case *DivExpr: // Division by zero error is possible, unless the right-hand side is a // non-zero constant. + // + // TODO(radu): this case should be removed (Div should be covered by the + // binary operator logic below). var nonZero bool if c, ok := t.Right.(*ConstExpr); ok { switch v := c.Value.(type) { @@ -1413,7 +1416,34 @@ func BuildSharedProps(e opt.Expr, shared *props.Shared) { shared.VolatilitySet.Add(volatility) default: - if opt.IsMutationOp(e) { + if opt.IsUnaryOp(e) { + inputType := e.Child(0).(opt.ScalarExpr).DataType() + o, ok := FindUnaryOverload(e.Op(), inputType) + if !ok { + panic(errors.AssertionFailedf("unary overload not found (%s, %s)", e.Op(), inputType)) + } + shared.VolatilitySet.Add(o.Volatility) + } else if opt.IsComparisonOp(e) { + leftType := e.Child(0).(opt.ScalarExpr).DataType() + rightType := e.Child(1).(opt.ScalarExpr).DataType() + o, _, _, ok := FindComparisonOverload(e.Op(), leftType, rightType) + if !ok { + panic(errors.AssertionFailedf( + "comparison overload not found (%s, %s, %s)", e.Op(), leftType, rightType, + )) + } + shared.VolatilitySet.Add(o.Volatility) + } else if opt.IsBinaryOp(e) { + leftType := e.Child(0).(opt.ScalarExpr).DataType() + rightType := e.Child(1).(opt.ScalarExpr).DataType() + o, ok := FindBinaryOverload(e.Op(), leftType, rightType) + if !ok { + panic(errors.AssertionFailedf( + "binary overload not found (%s, %s, %s)", e.Op(), leftType, rightType, + )) + } + shared.VolatilitySet.Add(o.Volatility) + } else if opt.IsMutationOp(e) { shared.CanHaveSideEffects = true shared.CanMutate = true shared.VolatilitySet.AddVolatile() diff --git a/pkg/sql/opt/memo/testdata/format b/pkg/sql/opt/memo/testdata/format index db05c0fc99dc..4033c79ee52a 100644 --- a/pkg/sql/opt/memo/testdata/format +++ b/pkg/sql/opt/memo/testdata/format @@ -7,6 +7,7 @@ SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a ---- sort ├── columns: "?column?":5(int) min:4(int!null) [hidden: t.public.t.a:1(int)] + ├── immutable ├── stats: [rows=98.1771622] ├── cost: 1097.87224 ├── key: (1) @@ -15,6 +16,7 @@ sort ├── prune: (1,4,5) └── project ├── columns: "?column?":5(int) t.public.t.a:1(int) min:4(int!null) + ├── immutable ├── stats: [rows=98.1771622] ├── cost: 1082.90531 ├── key: (1) @@ -23,6 +25,7 @@ sort ├── group-by │ ├── columns: t.public.t.a:1(int) min:4(int!null) │ ├── grouping columns: t.public.t.a:1(int) + │ ├── immutable │ ├── stats: [rows=98.1771622, distinct(1)=98.1771622, null(1)=1] │ ├── cost: 1080.93177 │ ├── key: (1) @@ -30,6 +33,7 @@ sort │ ├── prune: (4) │ ├── select │ │ ├── columns: t.public.t.a:1(int) t.public.t.b:2(int!null) t.public.t.k:3(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=330, distinct(1)=98.1771622, null(1)=3.3, distinct(2)=100, null(2)=0] │ │ ├── cost: 1070.03 │ │ ├── key: (3) @@ -44,7 +48,7 @@ sort │ │ │ ├── prune: (1-3) │ │ │ └── interesting orderings: (+3) │ │ └── filters - │ │ └── lt [type=bool, outer=(1-3), constraints=(/2: (/NULL - ])] + │ │ └── lt [type=bool, outer=(1-3), immutable, constraints=(/2: (/NULL - ])] │ │ ├── variable: t.public.t.b:2 [type=int] │ │ └── plus [type=int] │ │ ├── variable: t.public.t.k:3 [type=int] @@ -53,7 +57,7 @@ sort │ └── min [as=min:4, type=int, outer=(2)] │ └── variable: t.public.t.b:2 [type=int] └── projections - └── plus [as="?column?":5, type=int, outer=(1)] + └── plus [as="?column?":5, type=int, outer=(1), immutable] ├── variable: t.public.t.a:1 [type=int] └── const: 1 [type=int] @@ -101,23 +105,27 @@ SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a ---- sort ├── columns: "?column?":5(int) min:4(int!null) [hidden: a:1(int)] + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── ordering: +1 ├── prune: (1,4,5) └── project ├── columns: "?column?":5(int) a:1(int) min:4(int!null) + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── prune: (1,4,5) ├── group-by │ ├── columns: a:1(int) min:4(int!null) │ ├── grouping columns: a:1(int) + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4) │ ├── prune: (4) │ ├── select │ │ ├── columns: a:1(int) b:2(int!null) k:3(int!null) + │ │ ├── immutable │ │ ├── key: (3) │ │ ├── fd: (3)-->(1,2) │ │ ├── interesting orderings: (+3) @@ -128,35 +136,39 @@ sort │ │ │ ├── prune: (1-3) │ │ │ └── interesting orderings: (+3) │ │ └── filters - │ │ └── b:2 < (k:3 + a:1) [type=bool, outer=(1-3), constraints=(/2: (/NULL - ])] + │ │ └── b:2 < (k:3 + a:1) [type=bool, outer=(1-3), immutable, constraints=(/2: (/NULL - ])] │ └── aggregations │ └── min [as=min:4, type=int, outer=(2)] │ └── b:2 [type=int] └── projections - └── a:1 + 1 [as="?column?":5, type=int, outer=(1)] + └── a:1 + 1 [as="?column?":5, type=int, outer=(1), immutable] opt format=(hide-stats,hide-cost,hide-qual,hide-scalars,hide-types) SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a ---- sort ├── columns: "?column?":5 min:4!null [hidden: a:1] + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── ordering: +1 ├── prune: (1,4,5) └── project ├── columns: "?column?":5 a:1 min:4!null + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── prune: (1,4,5) ├── group-by │ ├── columns: a:1 min:4!null │ ├── grouping columns: a:1 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4) │ ├── prune: (4) │ ├── select │ │ ├── columns: a:1 b:2!null k:3!null + │ │ ├── immutable │ │ ├── key: (3) │ │ ├── fd: (3)-->(1,2) │ │ ├── interesting orderings: (+3) @@ -167,35 +179,39 @@ sort │ │ │ ├── prune: (1-3) │ │ │ └── interesting orderings: (+3) │ │ └── filters - │ │ └── b:2 < (k:3 + a:1) [outer=(1-3), constraints=(/2: (/NULL - ])] + │ │ └── b:2 < (k:3 + a:1) [outer=(1-3), immutable, constraints=(/2: (/NULL - ])] │ └── aggregations │ └── min [as=min:4, outer=(2)] │ └── b:2 └── projections - └── a:1 + 1 [as="?column?":5, outer=(1)] + └── a:1 + 1 [as="?column?":5, outer=(1), immutable] opt format=(hide-stats,hide-cost,hide-qual,hide-scalars,hide-notnull) SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a ---- sort ├── columns: "?column?":5(int) min:4(int) [hidden: a:1(int)] + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── ordering: +1 ├── prune: (1,4,5) └── project ├── columns: "?column?":5(int) a:1(int) min:4(int) + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── prune: (1,4,5) ├── group-by │ ├── columns: a:1(int) min:4(int) │ ├── grouping columns: a:1(int) + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4) │ ├── prune: (4) │ ├── select │ │ ├── columns: a:1(int) b:2(int) k:3(int) + │ │ ├── immutable │ │ ├── key: (3) │ │ ├── fd: (3)-->(1,2) │ │ ├── interesting orderings: (+3) @@ -206,35 +222,39 @@ sort │ │ │ ├── prune: (1-3) │ │ │ └── interesting orderings: (+3) │ │ └── filters - │ │ └── b:2 < (k:3 + a:1) [type=bool, outer=(1-3), constraints=(/2: (/NULL - ])] + │ │ └── b:2 < (k:3 + a:1) [type=bool, outer=(1-3), immutable, constraints=(/2: (/NULL - ])] │ └── aggregations │ └── min [as=min:4, type=int, outer=(2)] │ └── b:2 [type=int] └── projections - └── a:1 + 1 [as="?column?":5, type=int, outer=(1)] + └── a:1 + 1 [as="?column?":5, type=int, outer=(1), immutable] opt format=(hide-stats,hide-cost,hide-qual,hide-scalars,hide-types,hide-notnull) SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a ---- sort ├── columns: "?column?":5 min:4 [hidden: a:1] + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── ordering: +1 ├── prune: (1,4,5) └── project ├── columns: "?column?":5 a:1 min:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(4,5) ├── prune: (1,4,5) ├── group-by │ ├── columns: a:1 min:4 │ ├── grouping columns: a:1 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4) │ ├── prune: (4) │ ├── select │ │ ├── columns: a:1 b:2 k:3 + │ │ ├── immutable │ │ ├── key: (3) │ │ ├── fd: (3)-->(1,2) │ │ ├── interesting orderings: (+3) @@ -245,12 +265,12 @@ sort │ │ │ ├── prune: (1-3) │ │ │ └── interesting orderings: (+3) │ │ └── filters - │ │ └── b:2 < (k:3 + a:1) [outer=(1-3), constraints=(/2: (/NULL - ])] + │ │ └── b:2 < (k:3 + a:1) [outer=(1-3), immutable, constraints=(/2: (/NULL - ])] │ └── aggregations │ └── min [as=min:4, outer=(2)] │ └── b:2 └── projections - └── a:1 + 1 [as="?column?":5, outer=(1)] + └── a:1 + 1 [as="?column?":5, outer=(1), immutable] opt format=(hide-miscprops,hide-physprops,hide-columns) SELECT a + 1, min(b) FROM t WHERE k + a > b GROUP BY a ORDER BY a diff --git a/pkg/sql/opt/memo/testdata/logprops/constraints b/pkg/sql/opt/memo/testdata/logprops/constraints index fe61562ef935..9688e792b40a 100644 --- a/pkg/sql/opt/memo/testdata/logprops/constraints +++ b/pkg/sql/opt/memo/testdata/logprops/constraints @@ -167,6 +167,7 @@ SELECT * FROM a WHERE x > 1 AND x < 5 AND x + y = 5 ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) @@ -179,7 +180,7 @@ select │ └── lt [type=bool] │ ├── variable: x:1 [type=int] │ └── const: 5 [type=int] - └── eq [type=bool, outer=(1,2)] + └── eq [type=bool, outer=(1,2), immutable] ├── plus [type=int] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -190,6 +191,7 @@ SELECT * FROM a WHERE x > 1 AND x + y >= 5 AND x + y <= 7 ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) @@ -197,12 +199,12 @@ select ├── gt [type=bool, outer=(1), constraints=(/1: [/2 - ]; tight)] │ ├── variable: x:1 [type=int] │ └── const: 1 [type=int] - ├── ge [type=bool, outer=(1,2)] + ├── ge [type=bool, outer=(1,2), immutable] │ ├── plus [type=int] │ │ ├── variable: x:1 [type=int] │ │ └── variable: y:2 [type=int] │ └── const: 5 [type=int] - └── le [type=bool, outer=(1,2)] + └── le [type=bool, outer=(1,2), immutable] ├── plus [type=int] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -357,11 +359,12 @@ SELECT * FROM a WHERE (x, y) > (1, 2) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── gt [type=bool, outer=(1,2), constraints=(/1/2: [/1/3 - ]; tight)] + └── gt [type=bool, outer=(1,2), immutable, constraints=(/1/2: [/1/3 - ]; tight)] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -374,11 +377,12 @@ SELECT * FROM a WHERE (x, y) >= (1, 2) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── ge [type=bool, outer=(1,2), constraints=(/1/2: [/1/2 - ]; tight)] + └── ge [type=bool, outer=(1,2), immutable, constraints=(/1/2: [/1/2 - ]; tight)] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -391,11 +395,12 @@ SELECT * FROM a WHERE (x, y) < (1, 2) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── lt [type=bool, outer=(1,2), constraints=(/1/2: (/NULL - /1/1]; tight)] + └── lt [type=bool, outer=(1,2), immutable, constraints=(/1/2: (/NULL - /1/1]; tight)] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -408,11 +413,12 @@ SELECT * FROM a WHERE (x, y) <= (1, 2) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── le [type=bool, outer=(1,2), constraints=(/1/2: (/NULL - /1/2]; tight)] + └── le [type=bool, outer=(1,2), immutable, constraints=(/1/2: (/NULL - /1/2]; tight)] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -426,11 +432,12 @@ SELECT * FROM a WHERE (x, y) >= (1, 2.5) ---- select ├── columns: x:1(int) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── ge [type=bool, outer=(1,2)] + └── ge [type=bool, outer=(1,2), immutable] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -444,11 +451,12 @@ SELECT * FROM a WHERE (x, y) >= (1, NULL) ---- select ├── columns: x:1(int) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── ge [type=bool, outer=(1,2)] + └── ge [type=bool, outer=(1,2), immutable] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -463,12 +471,13 @@ SELECT * FROM a WHERE (x, 1) >= (1, 2) ---- select ├── columns: x:1(int) y:2(int) + ├── immutable ├── prune: (2) ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── ge [type=bool, outer=(1)] + └── ge [type=bool, outer=(1), immutable] ├── tuple [type=tuple{int, int}] │ ├── variable: x:1 [type=int] │ └── const: 1 [type=int] @@ -640,6 +649,7 @@ SELECT * FROM (SELECT (x, y) AS col FROM a) WHERE col > (1, 2) ---- select ├── columns: col:4(tuple{int, int}!null) + ├── immutable ├── project │ ├── columns: col:4(tuple{int, int}) │ ├── prune: (4) @@ -651,7 +661,7 @@ select │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] └── filters - └── gt [type=bool, outer=(4), constraints=(/4: [/(1, 3) - ]; tight)] + └── gt [type=bool, outer=(4), immutable, constraints=(/4: [/(1, 3) - ]; tight)] ├── variable: col:4 [type=tuple{int, int}] └── tuple [type=tuple{int, int}] ├── const: 1 [type=int] @@ -812,6 +822,7 @@ SELECT * FROM c WHERE (v, u + v) IN ((1, 2), (3, 50), (5, 100)) ---- select ├── columns: k:1(int!null) u:2(int) v:3(int!null) + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3) ├── prune: (1) @@ -827,7 +838,7 @@ select │ ├── prune: (1-3) │ └── interesting orderings: (+1) (+3,+2,+1) └── filters - └── in [type=bool, outer=(2,3), constraints=(/3: [/1 - /1] [/3 - /3] [/5 - /5])] + └── in [type=bool, outer=(2,3), immutable, constraints=(/3: [/1 - /1] [/3 - /3] [/5 - /5])] ├── tuple [type=tuple{int, int}] │ ├── variable: v:3 [type=int] │ └── plus [type=int] @@ -1251,11 +1262,12 @@ SELECT * FROM a WHERE (x, y) < (1, 2) OR (x, y) > (3, 4) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── scan a │ ├── columns: x:1(int) y:2(int) │ └── prune: (1,2) └── filters - └── or [type=bool, outer=(1,2), constraints=(/1/2: (/NULL - /1/1] [/3/5 - ]; tight)] + └── or [type=bool, outer=(1,2), immutable, constraints=(/1/2: (/NULL - /1/1] [/3/5 - ]; tight)] ├── lt [type=bool] │ ├── tuple [type=tuple{int, int}] │ │ ├── variable: x:1 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/index-join b/pkg/sql/opt/memo/testdata/logprops/index-join index 0ee2af826526..d2953cda92e8 100644 --- a/pkg/sql/opt/memo/testdata/logprops/index-join +++ b/pkg/sql/opt/memo/testdata/logprops/index-join @@ -27,6 +27,7 @@ SELECT * FROM a WHERE s = 'foo' AND x + y = 10 ---- select ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) + ├── immutable ├── key: (1) ├── fd: ()-->(3), (1)-->(2,4), (4)-->(1,2), (2,3)~~>(1,4) ├── prune: (4) @@ -44,7 +45,7 @@ select │ ├── prune: (1,3,4) │ └── interesting orderings: (+1) (-3,+4,+1) └── filters - └── eq [type=bool, outer=(1,2)] + └── eq [type=bool, outer=(1,2), immutable] ├── plus [type=int] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] @@ -55,9 +56,11 @@ SELECT y FROM a WHERE s = 'foo' AND x + y = 10 ---- project ├── columns: y:2(int) + ├── immutable ├── prune: (2) └── select ├── columns: x:1(int!null) y:2(int) s:3(string!null) + ├── immutable ├── key: (1) ├── fd: ()-->(3), (1)-->(2), (2,3)~~>(1) ├── interesting orderings: (+1) (-3) @@ -74,7 +77,7 @@ project │ ├── prune: (1,3) │ └── interesting orderings: (+1) (-3) └── filters - └── eq [type=bool, outer=(1,2)] + └── eq [type=bool, outer=(1,2), immutable] ├── plus [type=int] │ ├── variable: x:1 [type=int] │ └── variable: y:2 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/insert b/pkg/sql/opt/memo/testdata/logprops/insert index da2b0c325da0..7db1dc52f012 100644 --- a/pkg/sql/opt/memo/testdata/logprops/insert +++ b/pkg/sql/opt/memo/testdata/logprops/insert @@ -71,7 +71,7 @@ insert abcde │ └── cast: INT8 [as=column12:12, type=int, immutable] │ └── null [type=unknown] └── projections - └── plus [as=column13:13, type=int, outer=(8,10)] + └── plus [as=column13:13, type=int, outer=(8,10), immutable] ├── plus [type=int] │ ├── variable: y:8 [type=int] │ └── variable: column10:10 [type=int] @@ -139,7 +139,7 @@ project │ └── cast: INT8 [as=column12:12, type=int, immutable] │ └── null [type=unknown] └── projections - └── plus [as=column13:13, type=int, outer=(8,10)] + └── plus [as=column13:13, type=int, outer=(8,10), immutable] ├── plus [type=int] │ ├── variable: y:8 [type=int] │ └── variable: column10:10 [type=int] @@ -190,7 +190,7 @@ project │ └── cast: INT8 [as=column12:12, type=int, immutable] │ └── null [type=unknown] └── projections - └── plus [as=column13:13, type=int, outer=(8,10)] + └── plus [as=column13:13, type=int, outer=(8,10), immutable] ├── plus [type=int] │ ├── variable: y:8 [type=int] │ └── variable: column10:10 [type=int] @@ -242,7 +242,7 @@ insert abcde │ └── cast: INT8 [as=column11:11, type=int, immutable] │ └── null [type=unknown] └── projections - └── plus [as=column12:12, type=int, outer=(8,9)] + └── plus [as=column12:12, type=int, outer=(8,9), immutable] ├── plus [type=int] │ ├── variable: column2:8 [type=int] │ └── variable: column9:9 [type=int] @@ -310,7 +310,7 @@ project │ └── cast: INT8 [as=column13:13, type=int, immutable] │ └── null [type=unknown] └── projections - └── plus [as=column14:14, type=int, outer=(10,11)] + └── plus [as=column14:14, type=int, outer=(10,11), immutable] ├── plus [type=int] │ ├── variable: int8:10 [type=int] │ └── variable: column11:11 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/join b/pkg/sql/opt/memo/testdata/logprops/join index f954aeb3caac..da58abe2557b 100644 --- a/pkg/sql/opt/memo/testdata/logprops/join +++ b/pkg/sql/opt/memo/testdata/logprops/join @@ -1989,6 +1989,7 @@ SELECT * FROM mn LEFT JOIN xysd ON y = (n * 2) ---- project ├── columns: m:1(int!null) n:2(int) x:3(int) y:4(int) s:5(string) d:6(decimal) + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (2)~~>(1), (3)-->(4-6), (5,6)~~>(3,4) ├── prune: (1-6) @@ -1997,6 +1998,7 @@ project └── left-join (hash) ├── columns: m:1(int!null) n:2(int) x:3(int) y:4(int) s:5(string) d:6(decimal) column7:7(int) ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (2)~~>(1), (2)-->(7), (3)-->(4-6), (5,6)~~>(3,4) ├── prune: (1-3,5,6) @@ -2004,6 +2006,7 @@ project ├── interesting orderings: (+1) (+2,+1) (+3) (-5,+6,+3) ├── project │ ├── columns: column7:7(int) m:1(int!null) n:2(int) + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (2)~~>(1), (2)-->(7) │ ├── prune: (1,2,7) @@ -2017,7 +2020,7 @@ project │ │ ├── interesting orderings: (+1) (+2,+1) │ │ └── unfiltered-cols: (1,2) │ └── projections - │ └── mult [as=column7:7, type=int, outer=(2)] + │ └── mult [as=column7:7, type=int, outer=(2), immutable] │ ├── variable: n:2 [type=int] │ └── const: 2 [type=int] ├── scan xysd diff --git a/pkg/sql/opt/memo/testdata/logprops/project b/pkg/sql/opt/memo/testdata/logprops/project index 2ab2947a7c7c..2f2b2b08ff2a 100644 --- a/pkg/sql/opt/memo/testdata/logprops/project +++ b/pkg/sql/opt/memo/testdata/logprops/project @@ -15,6 +15,7 @@ SELECT y, x+1 AS a, 1 AS b, x FROM xysd ---- project ├── columns: y:2(int) a:5(int!null) b:6(int!null) x:1(int!null) + ├── immutable ├── key: (1) ├── fd: ()-->(6), (1)-->(2,5) ├── prune: (1,2,5,6) @@ -26,7 +27,7 @@ project │ ├── prune: (1-4) │ └── interesting orderings: (+1) (-3,+4,+1) └── projections - ├── plus [as=a:5, type=int, outer=(1)] + ├── plus [as=a:5, type=int, outer=(1), immutable] │ ├── variable: x:1 [type=int] │ └── const: 1 [type=int] └── const: 1 [as=b:6, type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/scalar b/pkg/sql/opt/memo/testdata/logprops/scalar index 6d8356b27778..b5999b24d55c 100644 --- a/pkg/sql/opt/memo/testdata/logprops/scalar +++ b/pkg/sql/opt/memo/testdata/logprops/scalar @@ -67,7 +67,7 @@ project │ ├── function: length [type=int] │ │ └── const: 'foo' [type=string] │ └── variable: y:2 [type=int] - └── mult [as=b:7, type=int, outer=(1,5)] + └── mult [as=b:7, type=int, outer=(1,5), immutable] ├── variable: rowid:5 [type=int] └── variable: x:1 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/set b/pkg/sql/opt/memo/testdata/logprops/set index 0b85ab23a170..18f8a6464a32 100644 --- a/pkg/sql/opt/memo/testdata/logprops/set +++ b/pkg/sql/opt/memo/testdata/logprops/set @@ -102,6 +102,7 @@ SELECT * FROM xy WHERE (SELECT x, u FROM uv UNION SELECT y, v FROM uv) = (1, 2) ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── interesting orderings: (+1) @@ -112,7 +113,7 @@ select │ ├── prune: (1,2) │ └── interesting orderings: (+1) └── filters - └── eq [type=bool, outer=(1,2), correlated-subquery] + └── eq [type=bool, outer=(1,2), immutable, correlated-subquery] ├── subquery [type=tuple{int, int}] │ └── max1-row │ ├── columns: column13:13(tuple{int, int}) diff --git a/pkg/sql/opt/memo/testdata/logprops/update b/pkg/sql/opt/memo/testdata/logprops/update index 91f726ef6902..3b755f9ebbfb 100644 --- a/pkg/sql/opt/memo/testdata/logprops/update +++ b/pkg/sql/opt/memo/testdata/logprops/update @@ -31,6 +31,7 @@ update abcde ├── volatile, side-effects, mutations └── project ├── columns: column15:15(int!null) a:7(int!null) b:8(int) c:9(int!null) d:10(int) rowid:11(int!null) e:12(int) b_new:13(int!null) column14:14(int!null) + ├── immutable ├── key: (11) ├── fd: ()-->(7,13,14), (11)-->(8-10,12), (9)-->(15) ├── prune: (7-15) @@ -75,7 +76,7 @@ update abcde │ └── projections │ └── const: 0 [as=column14:14, type=int] └── projections - └── plus [as=column15:15, type=int, outer=(9,13)] + └── plus [as=column15:15, type=int, outer=(9,13), immutable] ├── plus [type=int] │ ├── variable: b_new:13 [type=int] │ └── variable: c:9 [type=int] @@ -102,6 +103,7 @@ project ├── fd: ()-->(1,2), (5)-->(3,4), (3)-->(4) └── project ├── columns: column15:15(int!null) a:7(int!null) b:8(int) c:9(int!null) d:10(int) rowid:11(int!null) e:12(int) b_new:13(int!null) column14:14(int!null) + ├── immutable ├── key: (11) ├── fd: ()-->(7,13,14), (11)-->(8-10,12), (9)-->(15) ├── prune: (7-15) @@ -146,7 +148,7 @@ project │ └── projections │ └── const: 0 [as=column14:14, type=int] └── projections - └── plus [as=column15:15, type=int, outer=(9,13)] + └── plus [as=column15:15, type=int, outer=(9,13), immutable] ├── plus [type=int] │ ├── variable: b_new:13 [type=int] │ └── variable: c:9 [type=int] @@ -177,6 +179,7 @@ project └── project ├── columns: column15:15(int!null) a:7(int!null) b:8(int) c:9(int!null) d:10(int) rowid:11(int!null) e:12(int) b_new:13(int!null) column14:14(int!null) ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(7-15) ├── prune: (7-15) @@ -224,7 +227,7 @@ project │ └── projections │ └── const: 0 [as=column14:14, type=int] └── projections - └── plus [as=column15:15, type=int, outer=(9,13)] + └── plus [as=column15:15, type=int, outer=(9,13), immutable] ├── plus [type=int] │ ├── variable: b_new:13 [type=int] │ └── variable: c:9 [type=int] @@ -251,6 +254,7 @@ project ├── fd: ()-->(1), (2)==(3), (3)==(2), (5)-->(2-4), (2)-->(4) └── project ├── columns: column15:15(int!null) a:7(int!null) b:8(int!null) c:9(int!null) d:10(int) rowid:11(int!null) e:12(int) a_new:13(int!null) column14:14(int!null) + ├── immutable ├── key: (11) ├── fd: ()-->(13,14), (11)-->(7-10,12), (8)==(9), (9)==(8), (8,9)-->(15) ├── prune: (7-15) @@ -295,7 +299,7 @@ project │ └── projections │ └── const: 0 [as=column14:14, type=int] └── projections - └── plus [as=column15:15, type=int, outer=(8,9)] + └── plus [as=column15:15, type=int, outer=(8,9), immutable] ├── plus [type=int] │ ├── variable: b:8 [type=int] │ └── variable: c:9 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/upsert b/pkg/sql/opt/memo/testdata/logprops/upsert index 87f6040a1434..63a2fc45bbb4 100644 --- a/pkg/sql/opt/memo/testdata/logprops/upsert +++ b/pkg/sql/opt/memo/testdata/logprops/upsert @@ -129,7 +129,7 @@ project │ │ │ │ │ │ └── projections │ │ │ │ │ │ └── function: unique_rowid [as=column8:8, type=int, volatile, side-effects] │ │ │ │ │ └── projections - │ │ │ │ │ └── plus [as=column9:9, type=int, outer=(6)] + │ │ │ │ │ └── plus [as=column9:9, type=int, outer=(6), immutable] │ │ │ │ │ ├── variable: y:6 [type=int] │ │ │ │ │ └── const: 1 [type=int] │ │ │ │ └── aggregations @@ -158,11 +158,11 @@ project │ │ │ └── variable: c:12 [type=int] │ │ └── projections │ │ ├── const: 1 [as=a_new:14, type=int] - │ │ └── plus [as=b_new:15, type=int, outer=(6,12)] + │ │ └── plus [as=b_new:15, type=int, outer=(6,12), immutable] │ │ ├── variable: y:6 [type=int] │ │ └── variable: c:12 [type=int] │ └── projections - │ └── plus [as=column16:16, type=int, outer=(15)] + │ └── plus [as=column16:16, type=int, outer=(15), immutable] │ ├── variable: b_new:15 [type=int] │ └── const: 1 [type=int] └── projections @@ -340,7 +340,7 @@ project │ │ │ │ │ │ │ │ │ │ └── projections │ │ │ │ │ │ │ │ │ │ └── function: unique_rowid [as=column8:8, type=int, volatile, side-effects] │ │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ │ └── plus [as=column9:9, type=int, outer=(6)] + │ │ │ │ │ │ │ │ │ └── plus [as=column9:9, type=int, outer=(6), immutable] │ │ │ │ │ │ │ │ │ ├── variable: y:6 [type=int] │ │ │ │ │ │ │ │ │ └── const: 1 [type=int] │ │ │ │ │ │ │ │ ├── scan abc @@ -514,7 +514,7 @@ project │ │ │ │ │ │ ├── const: 10 [as=column6:6, type=int] │ │ │ │ │ │ └── function: unique_rowid [as=column7:7, type=int, volatile, side-effects] │ │ │ │ │ └── projections - │ │ │ │ │ └── plus [as=column8:8, type=int, outer=(6)] + │ │ │ │ │ └── plus [as=column8:8, type=int, outer=(6), immutable] │ │ │ │ │ ├── variable: column6:6 [type=int] │ │ │ │ │ └── const: 1 [type=int] │ │ │ │ └── aggregations @@ -541,7 +541,7 @@ project │ │ │ ├── variable: column7:7 [type=int] │ │ │ └── variable: rowid:12 [type=int] │ │ └── projections - │ │ └── plus [as=column13:13, type=int, outer=(10)] + │ │ └── plus [as=column13:13, type=int, outer=(10), immutable] │ │ ├── variable: b:10 [type=int] │ │ └── const: 1 [type=int] │ └── projections @@ -570,7 +570,7 @@ project │ │ └── variable: column7:7 [type=int] │ └── variable: rowid:12 [type=int] └── projections - └── plus [as="?column?":17, type=int, outer=(2,3)] + └── plus [as="?column?":17, type=int, outer=(2,3), immutable] ├── variable: b:2 [type=int] └── variable: c:3 [type=int] @@ -672,7 +672,7 @@ upsert abc │ │ │ │ │ │ ├── const: 10 [as=column8:8, type=int] │ │ │ │ │ │ └── function: unique_rowid [as=column9:9, type=int, volatile, side-effects] │ │ │ │ │ └── projections - │ │ │ │ │ └── plus [as=column10:10, type=int, outer=(8)] + │ │ │ │ │ └── plus [as=column10:10, type=int, outer=(8), immutable] │ │ │ │ │ ├── variable: column8:8 [type=int] │ │ │ │ │ └── const: 1 [type=int] │ │ │ │ └── aggregations @@ -701,7 +701,7 @@ upsert abc │ │ └── projections │ │ └── const: 2 [as=b_new:15, type=int] │ └── projections - │ └── plus [as=column16:16, type=int, outer=(15)] + │ └── plus [as=column16:16, type=int, outer=(15), immutable] │ ├── variable: b_new:15 [type=int] │ └── const: 1 [type=int] └── projections diff --git a/pkg/sql/opt/memo/testdata/logprops/values b/pkg/sql/opt/memo/testdata/logprops/values index f4e4cd115e05..ebf8a5475cc3 100644 --- a/pkg/sql/opt/memo/testdata/logprops/values +++ b/pkg/sql/opt/memo/testdata/logprops/values @@ -64,6 +64,7 @@ SELECT (VALUES (x), (y+1)) FROM xy ---- project ├── columns: column1:4(int) + ├── immutable ├── prune: (4) ├── scan xy │ ├── columns: x:1(int!null) y:2(int) @@ -72,18 +73,20 @@ project │ ├── prune: (1,2) │ └── interesting orderings: (+1) └── projections - └── subquery [as=column1:4, type=int, outer=(1,2), correlated-subquery] + └── subquery [as=column1:4, type=int, outer=(1,2), immutable, correlated-subquery] └── max1-row ├── columns: column1:3(int) ├── error: "more than one row returned by a subquery used as an expression" ├── outer: (1,2) ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(3) └── values ├── columns: column1:3(int) ├── outer: (1,2) ├── cardinality: [2 - 2] + ├── immutable ├── prune: (3) ├── tuple [type=tuple{int}] │ └── variable: x:1 [type=int] diff --git a/pkg/sql/opt/memo/testdata/logprops/window b/pkg/sql/opt/memo/testdata/logprops/window index 7fc1ca157c55..b67e0bb8ed3c 100644 --- a/pkg/sql/opt/memo/testdata/logprops/window +++ b/pkg/sql/opt/memo/testdata/logprops/window @@ -90,6 +90,7 @@ SELECT k, (SELECT rank() OVER () + x FROM (SELECT k AS x)) FROM kv ---- project ├── columns: k:1(int!null) "?column?":11(int) + ├── immutable ├── key: (1) ├── fd: (1)-->(11) ├── prune: (1,11) @@ -101,18 +102,20 @@ project │ ├── prune: (1-7) │ └── interesting orderings: (+1) └── projections - └── subquery [as="?column?":11, type=int, outer=(1), correlated-subquery] + └── subquery [as="?column?":11, type=int, outer=(1), immutable, correlated-subquery] └── max1-row ├── columns: "?column?":10(int) ├── error: "more than one row returned by a subquery used as an expression" ├── outer: (1) ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(10) └── project ├── columns: "?column?":10(int) ├── outer: (1) ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(10) ├── prune: (10) @@ -139,7 +142,7 @@ project │ └── windows │ └── rank [as=rank:9, type=int] └── projections - └── plus [as="?column?":10, type=int, outer=(8,9)] + └── plus [as="?column?":10, type=int, outer=(8,9), immutable] ├── variable: rank:9 [type=int] └── variable: x:8 [type=int] diff --git a/pkg/sql/opt/memo/testdata/memo b/pkg/sql/opt/memo/testdata/memo index 8cf88bf5211c..6be4aa8bcb78 100644 --- a/pkg/sql/opt/memo/testdata/memo +++ b/pkg/sql/opt/memo/testdata/memo @@ -58,7 +58,7 @@ limit │ │ │ └── variable: a.x:1 [type=int] │ │ └── variable: b.x:3 [type=string] │ └── projections - │ └── plus [as=c:5, type=int, outer=(2)] + │ └── plus [as=c:5, type=int, outer=(2), immutable] │ ├── variable: y:2 [type=int] │ └── const: 1 [type=int] └── const: 10 [type=int] @@ -117,7 +117,7 @@ project │ │ └── filters (true) │ └── const: 10 [type=int] └── projections - └── plus [as=c:6, type=int, outer=(2)] + └── plus [as=c:6, type=int, outer=(2), immutable] ├── variable: y:2 [type=int] └── const: 1 [type=int] diff --git a/pkg/sql/opt/memo/testdata/stats/groupby b/pkg/sql/opt/memo/testdata/stats/groupby index 2062dd835eb7..d65bcae3ec7d 100644 --- a/pkg/sql/opt/memo/testdata/stats/groupby +++ b/pkg/sql/opt/memo/testdata/stats/groupby @@ -186,6 +186,7 @@ SELECT sum(x), s FROM a GROUP BY s HAVING sum(x) = 5 ---- select ├── columns: sum:5(decimal!null) s:4(string) + ├── immutable ├── stats: [rows=1, distinct(5)=1, null(5)=0] ├── key: (4) ├── fd: ()-->(5) @@ -209,7 +210,7 @@ select │ └── sum [as=sum:5, type=decimal, outer=(1)] │ └── x:1 [type=int] └── filters - └── sum:5 = 5 [type=bool, outer=(5), constraints=(/5: [/5 - /5]; tight), fd=()-->(5)] + └── sum:5 = 5 [type=bool, outer=(5), immutable, constraints=(/5: [/5 - /5]; tight), fd=()-->(5)] # Scalar GroupBy. build @@ -431,6 +432,7 @@ SELECT sum(x), s FROM a GROUP BY s HAVING sum(x) = 5 ---- select ├── columns: sum:5(decimal!null) s:4(string) + ├── immutable ├── stats: [rows=1, distinct(5)=1, null(5)=0] ├── key: (4) ├── fd: ()-->(5) @@ -454,7 +456,7 @@ select │ └── sum [as=sum:5, type=decimal, outer=(1)] │ └── x:1 [type=int] └── filters - └── sum:5 = 5 [type=bool, outer=(5), constraints=(/5: [/5 - /5]; tight), fd=()-->(5)] + └── sum:5 = 5 [type=bool, outer=(5), immutable, constraints=(/5: [/5 - /5]; tight), fd=()-->(5)] # Regression test for #36442. norm diff --git a/pkg/sql/opt/memo/testdata/stats/index-join b/pkg/sql/opt/memo/testdata/stats/index-join index e4684d34e85d..30baf15b0bfe 100644 --- a/pkg/sql/opt/memo/testdata/stats/index-join +++ b/pkg/sql/opt/memo/testdata/stats/index-join @@ -39,15 +39,18 @@ SELECT count(*) FROM (SELECT * FROM a WHERE s = 'foo' AND x + y = 10) GROUP BY s ---- project ├── columns: count:5(int!null) + ├── immutable ├── stats: [rows=49.2384513] └── group-by ├── columns: y:2(int) count_rows:5(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=49.2384513, distinct(2)=49.2384513, null(2)=0] ├── key: (2) ├── fd: (2)-->(5) ├── select │ ├── columns: x:1(int!null) y:2(int) s:3(string!null) + │ ├── immutable │ ├── stats: [rows=66.6666667, distinct(2)=49.2384513, null(2)=0, distinct(3)=1, null(3)=0] │ ├── key: (1) │ ├── fd: ()-->(3), (1)-->(2) @@ -63,7 +66,7 @@ project │ │ ├── key: (1) │ │ └── fd: ()-->(3) │ └── filters - │ └── (x:1 + y:2) = 10 [type=bool, outer=(1,2)] + │ └── (x:1 + y:2) = 10 [type=bool, outer=(1,2), immutable] └── aggregations └── count-rows [as=count_rows:5, type=int] @@ -72,6 +75,7 @@ SELECT * FROM a WHERE s = 'foo' AND x + y = 10 ---- select ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) + ├── immutable ├── stats: [rows=66.6666667, distinct(1)=66.6666667, null(1)=0, distinct(2)=49.2384513, null(2)=0, distinct(3)=1, null(3)=0, distinct(4)=57.5057212, null(4)=0, distinct(1-3)=66.6666667, null(1-3)=0] ├── key: (1) ├── fd: ()-->(3), (1)-->(2,4), (4)-->(1,2) @@ -87,7 +91,7 @@ select │ ├── key: (1) │ └── fd: ()-->(3), (1)-->(4), (4)-->(1) └── filters - └── (x:1 + y:2) = 10 [type=bool, outer=(1,2)] + └── (x:1 + y:2) = 10 [type=bool, outer=(1,2), immutable] opt colstat=1 colstat=2 colstat=3 colstat=(1,2,3) SELECT * FROM a WHERE s = 'foo' @@ -180,6 +184,7 @@ SELECT * FROM a WHERE s = 'foo' AND x + y = 10 ---- select ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) + ├── immutable ├── stats: [rows=33.3333333, distinct(1)=33.3333333, null(1)=0, distinct(2)=28.5927601, null(2)=16.6666667, distinct(3)=1, null(3)=0, distinct(4)=30.9412676, null(4)=0, distinct(1-3)=33.3333333, null(1-3)=0] ├── key: (1) ├── fd: ()-->(3), (1)-->(2,4), (4)-->(1,2) @@ -195,7 +200,7 @@ select │ ├── key: (1) │ └── fd: ()-->(3), (1)-->(4), (4)-->(1) └── filters - └── (x:1 + y:2) = 10 [type=bool, outer=(1,2)] + └── (x:1 + y:2) = 10 [type=bool, outer=(1,2), immutable] opt colstat=1 colstat=2 colstat=3 colstat=(1,2,3) SELECT * FROM a WHERE s = 'foo' diff --git a/pkg/sql/opt/memo/testdata/stats/join b/pkg/sql/opt/memo/testdata/stats/join index 79ef7c32c134..6342cc9f89ea 100644 --- a/pkg/sql/opt/memo/testdata/stats/join +++ b/pkg/sql/opt/memo/testdata/stats/join @@ -382,6 +382,7 @@ SELECT * FROM xysd JOIN uv ON x=u AND y+v=5 AND y > 0 AND y < 300 inner-join (hash) ├── columns: x:1(int!null) y:2(int!null) s:3(string) d:4(decimal!null) u:5(int!null) v:6(int!null) ├── multiplicity: left-rows(zero-or-more), right-rows(zero-or-one) + ├── immutable ├── stats: [rows=3333.33333, distinct(1)=500, null(1)=0, distinct(5)=500, null(5)=0] ├── fd: (1)-->(2-4), (3,4)~~>(1,2), (1)==(5), (5)==(1) ├── select @@ -401,7 +402,7 @@ inner-join (hash) │ └── stats: [rows=10000, distinct(5)=500, null(5)=0, distinct(6)=100, null(6)=0] └── filters ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] # Force column statistics calculation for semi-join. norm @@ -411,15 +412,18 @@ GROUP BY y ---- project ├── columns: count:8(int!null) + ├── immutable ├── stats: [rows=138.170075] └── group-by ├── columns: y:2(int) count_rows:8(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=138.170075, distinct(2)=138.170075, null(2)=0] ├── key: (2) ├── fd: (2)-->(8) ├── semi-join (hash) │ ├── columns: x:1(int!null) y:2(int) + │ ├── immutable │ ├── stats: [rows=166.666667, distinct(1)=166.666667, null(1)=0, distinct(2)=138.170075, null(2)=0] │ ├── key: (1) │ ├── fd: (1)-->(2) @@ -433,7 +437,7 @@ project │ │ └── stats: [rows=10000, distinct(5)=500, null(5)=0, distinct(6)=100, null(6)=0] │ └── filters │ ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] @@ -445,15 +449,18 @@ GROUP BY y ---- project ├── columns: count:8(int!null) + ├── immutable ├── stats: [rows=400] └── group-by ├── columns: y:2(int) count_rows:8(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=400, distinct(2)=400, null(2)=0] ├── key: (2) ├── fd: (2)-->(8) ├── anti-join (hash) │ ├── columns: x:1(int!null) y:2(int) + │ ├── immutable │ ├── stats: [rows=4833.33333, distinct(2)=400, null(2)=0] │ ├── key: (1) │ ├── fd: (1)-->(2) @@ -467,7 +474,7 @@ project │ │ └── stats: [rows=10000, distinct(5)=500, null(5)=0, distinct(6)=100, null(6)=0] │ └── filters │ ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] @@ -479,16 +486,19 @@ GROUP BY y ---- project ├── columns: count:8(int!null) + ├── immutable ├── stats: [rows=400] └── group-by ├── columns: y:2(int) count_rows:8(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=400, distinct(2)=400, null(2)=0] ├── key: (2) ├── fd: (2)-->(8) ├── left-join (hash) │ ├── columns: x:1(int!null) y:2(int) u:5(int) v:6(int) │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-one) + │ ├── immutable │ ├── stats: [rows=5000, distinct(2)=400, null(2)=0, distinct(5)=500, null(5)=1666.66667] │ ├── fd: (1)-->(2) │ ├── scan xysd @@ -501,7 +511,7 @@ project │ │ └── stats: [rows=10000, distinct(5)=500, null(5)=0] │ └── filters │ ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] @@ -513,16 +523,19 @@ GROUP BY y ---- project ├── columns: count:8(int!null) + ├── immutable ├── stats: [rows=399.903879] └── group-by ├── columns: y:2(int) count_rows:8(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=399.903879, distinct(2)=399.903879, null(2)=1] ├── key: (2) ├── fd: (2)-->(8) ├── left-join (hash) │ ├── columns: x:1(int) y:2(int) u:5(int) v:6(int!null) │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-more) + │ ├── immutable │ ├── stats: [rows=10000, distinct(1)=500, null(1)=6666.66667, distinct(2)=399.903879, null(2)=6666.66667] │ ├── fd: (1)-->(2) │ ├── scan uv @@ -535,7 +548,7 @@ project │ │ └── fd: (1)-->(2) │ └── filters │ ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] @@ -547,16 +560,19 @@ GROUP BY y ---- project ├── columns: count:8(int!null) + ├── immutable ├── stats: [rows=400] └── group-by ├── columns: y:2(int) count_rows:8(int!null) ├── grouping columns: y:2(int) + ├── immutable ├── stats: [rows=400, distinct(2)=400, null(2)=1] ├── key: (2) ├── fd: (2)-->(8) ├── full-join (hash) │ ├── columns: x:1(int) y:2(int) u:5(int) v:6(int) │ ├── multiplicity: left-rows(one-or-more), right-rows(exactly-one) + │ ├── immutable │ ├── stats: [rows=11666.6667, distinct(2)=400, null(2)=6666.66667] │ ├── fd: (1)-->(2) │ ├── scan xysd @@ -569,7 +585,7 @@ project │ │ └── stats: [rows=10000, distinct(5)=500, null(5)=0] │ └── filters │ ├── x:1 = u:5 [type=bool, outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] - │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6)] + │ └── (y:2 + v:6) = 5 [type=bool, outer=(2,6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] diff --git a/pkg/sql/opt/memo/testdata/stats/project b/pkg/sql/opt/memo/testdata/stats/project index 86009e9264d5..335e42083e69 100644 --- a/pkg/sql/opt/memo/testdata/stats/project +++ b/pkg/sql/opt/memo/testdata/stats/project @@ -135,9 +135,11 @@ SELECT * FROM (SELECT y + 3 AS v FROM a) WHERE v >= 1 AND v <= 100 ---- select ├── columns: v:5(int!null) + ├── immutable ├── stats: [rows=1000, distinct(5)=100, null(5)=0] ├── project │ ├── columns: v:5(int) + │ ├── immutable │ ├── stats: [rows=2000, distinct(5)=200, null(5)=0] │ ├── scan a │ │ ├── columns: x:1(int!null) y:2(int) s:3(string) d:4(decimal!null) @@ -145,7 +147,7 @@ select │ │ ├── key: (1) │ │ └── fd: (1)-->(2-4), (3,4)~~>(1,2) │ └── projections - │ └── y:2 + 3 [as=v:5, type=int, outer=(2)] + │ └── y:2 + 3 [as=v:5, type=int, outer=(2), immutable] └── filters └── (v:5 >= 1) AND (v:5 <= 100) [type=bool, outer=(5), constraints=(/5: [/1 - /100]; tight)] diff --git a/pkg/sql/opt/memo/testdata/stats/scan b/pkg/sql/opt/memo/testdata/stats/scan index 3082e5853bf4..9a198543ce12 100644 --- a/pkg/sql/opt/memo/testdata/stats/scan +++ b/pkg/sql/opt/memo/testdata/stats/scan @@ -202,6 +202,7 @@ SELECT * FROM a WHERE ((s >= 'bar' AND s <= 'foo') OR (s >= 'foobar')) AND d > 5 ---- select ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) b:5(bool) + ├── immutable ├── stats: [rows=650, distinct(3)=1, null(3)=0, distinct(4)=650, null(4)=0, distinct(3,4)=650, null(3,4)=0] ├── key: (1) ├── fd: (1)-->(2-5), (3,4)-->(1,2,5) @@ -212,13 +213,14 @@ select │ └── fd: (1)-->(2-5), (3,4)~~>(1,2,5) └── filters ├── ((s:3 >= 'bar') AND (s:3 <= 'foo')) OR (s:3 >= 'foobar') [type=bool, outer=(3), constraints=(/3: [/'bar' - /'foo'] [/'foobar' - ]; tight)] - └── d:4 > 5.0 [type=bool, outer=(4), constraints=(/4: (/5.0 - ]; tight)] + └── d:4 > 5.0 [type=bool, outer=(4), immutable, constraints=(/4: (/5.0 - ]; tight)] opt SELECT * FROM a WHERE ((s >= 'bar' AND s <= 'foo') OR (s >= 'foobar')) AND d <= 5.0 AND s IS NOT NULL ---- select ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) b:5(bool) + ├── immutable ├── stats: [rows=650, distinct(3)=1, null(3)=0, distinct(4)=650, null(4)=0, distinct(3,4)=650, null(3,4)=0] ├── key: (1) ├── fd: (1)-->(2-5), (3,4)-->(1,2,5) @@ -229,7 +231,7 @@ select │ └── fd: (1)-->(2-5), (3,4)~~>(1,2,5) └── filters ├── (((s:3 >= 'bar') AND (s:3 <= 'foo')) OR (s:3 >= 'foobar')) AND (s:3 IS NOT NULL) [type=bool, outer=(3), constraints=(/3: [/'bar' - /'foo'] [/'foobar' - ]; tight)] - └── d:4 <= 5.0 [type=bool, outer=(4), constraints=(/4: (/NULL - /5.0]; tight)] + └── d:4 <= 5.0 [type=bool, outer=(4), immutable, constraints=(/4: (/NULL - /5.0]; tight)] # Bump up null counts. @@ -359,11 +361,13 @@ SELECT * FROM a WHERE ((s >= 'bar' AND s <= 'foo') OR (s >= 'foobar')) AND d <= ---- index-join a ├── columns: x:1(int!null) y:2(int) s:3(string!null) d:4(decimal!null) b:5(bool) + ├── immutable ├── stats: [rows=333.333333, distinct(3)=1, null(3)=0, distinct(4)=100, null(4)=0, distinct(3,4)=100, null(3,4)=0] ├── key: (1) ├── fd: (1)-->(2-5), (3,4)-->(1,2,5) └── select ├── columns: x:1(int!null) s:3(string!null) d:4(decimal!null) + ├── immutable ├── stats: [rows=333.333333, distinct(4)=98.265847, null(4)=0] ├── key: (1) ├── fd: (1)-->(3,4), (3,4)-->(1) @@ -376,7 +380,7 @@ index-join a │ ├── key: (1) │ └── fd: (1)-->(3,4), (3,4)-->(1) └── filters - └── d:4 <= 5.0 [type=bool, outer=(4), constraints=(/4: (/NULL - /5.0]; tight)] + └── d:4 <= 5.0 [type=bool, outer=(4), immutable, constraints=(/4: (/NULL - /5.0]; tight)] exec-ddl CREATE TABLE abcde ( @@ -720,6 +724,7 @@ SELECT * FROM hist WHERE c = 20 OR (c < 10) ---- index-join hist ├── columns: a:1(int) b:2(date) c:3(decimal!null) d:4(float) e:5(timestamp) f:6(timestamptz) g:7(string) + ├── immutable ├── stats: [rows=110, distinct(3)=10, null(3)=0] │ histogram(3)= 0 0 90 0 0 20 │ <--- 0 ---- 10 --- 20 @@ -739,6 +744,7 @@ SELECT * FROM hist WHERE c = 20 OR (c <= 10) ---- index-join hist ├── columns: a:1(int) b:2(date) c:3(decimal!null) d:4(float) e:5(timestamp) f:6(timestamptz) g:7(string) + ├── immutable ├── stats: [rows=120, distinct(3)=11, null(3)=0] │ histogram(3)= 0 0 90 10 0 20 │ <--- 0 ---- 10 --- 20 diff --git a/pkg/sql/opt/memo/testdata/stats/select b/pkg/sql/opt/memo/testdata/stats/select index 89c3e6f07eee..dec8eae7aee6 100644 --- a/pkg/sql/opt/memo/testdata/stats/select +++ b/pkg/sql/opt/memo/testdata/stats/select @@ -96,6 +96,7 @@ SELECT * FROM a WHERE x + y < 10 ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── stats: [rows=1333.33333] ├── key: (1) ├── fd: (1)-->(2) @@ -105,7 +106,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2) └── filters - └── (x:1 + y:2) < 10 [type=bool, outer=(1,2)] + └── (x:1 + y:2) < 10 [type=bool, outer=(1,2), immutable] # Remaining filter. norm @@ -538,6 +539,7 @@ SELECT * FROM order_history WHERE item_id = order_id AND customer_id % 2 = 0 ---- select ├── columns: order_id:1(int!null) item_id:2(int!null) customer_id:3(int) year:4(int) + ├── immutable ├── stats: [rows=3.267, distinct(1)=3.267, null(1)=0, distinct(2)=3.267, null(2)=0] ├── fd: (1)==(2), (2)==(1) ├── scan order_history @@ -545,7 +547,7 @@ select │ └── stats: [rows=1000, distinct(1)=100, null(1)=10, distinct(2)=100, null(2)=10] └── filters ├── item_id:2 = order_id:1 [type=bool, outer=(1,2), constraints=(/1: (/NULL - ]; /2: (/NULL - ]), fd=(1)==(2), (2)==(1)] - └── (customer_id:3 % 2) = 0 [type=bool, outer=(3)] + └── (customer_id:3 % 2) = 0 [type=bool, outer=(3), immutable] exec-ddl CREATE TABLE c (x INT, z INT NOT NULL, UNIQUE INDEX x_idx (x)) @@ -840,6 +842,7 @@ SELECT * FROM tjson WHERE b @> '{"a":"b"}' ---- index-join tjson ├── columns: a:1(int!null) b:2(jsonb) c:3(jsonb) + ├── immutable ├── stats: [rows=555.555556] ├── key: (1) ├── fd: (1)-->(2,3) @@ -858,6 +861,7 @@ inner-join (lookup tjson) ├── columns: a:1(int!null) b:2(jsonb) c:3(jsonb) ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── stats: [rows=61.7283951] ├── key: (1) ├── fd: (1)-->(2,3) @@ -869,7 +873,7 @@ inner-join (lookup tjson) │ ├── stats: [rows=61.7283951, distinct(1)=61.7283951, null(1)=0] │ └── filters (true) └── filters - └── b:2 @> '{"a": "b", "c": "d"}' [type=bool, outer=(2)] + └── b:2 @> '{"a": "b", "c": "d"}' [type=bool, outer=(2), immutable] # Should generate a select on the table with a JSON filter, since c does not # have an inverted index. @@ -878,6 +882,7 @@ SELECT * FROM tjson WHERE c @> '{"a":"b"}' ---- select ├── columns: a:1(int!null) b:2(jsonb) c:3(jsonb) + ├── immutable ├── stats: [rows=555.555556] ├── key: (1) ├── fd: (1)-->(2,3) @@ -887,7 +892,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2,3) └── filters - └── c:3 @> '{"a": "b"}' [type=bool, outer=(3)] + └── c:3 @> '{"a": "b"}' [type=bool, outer=(3), immutable] # Should have a lower row count than the above case, due to a containment query # on 2 json paths. @@ -896,6 +901,7 @@ SELECT * FROM tjson WHERE c @> '{"a":"b", "c":"d"}' ---- select ├── columns: a:1(int!null) b:2(jsonb) c:3(jsonb) + ├── immutable ├── stats: [rows=61.7283951] ├── key: (1) ├── fd: (1)-->(2,3) @@ -905,7 +911,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2,3) └── filters - └── c:3 @> '{"a": "b", "c": "d"}' [type=bool, outer=(3)] + └── c:3 @> '{"a": "b", "c": "d"}' [type=bool, outer=(3), immutable] # Bump up null counts. exec-ddl @@ -999,6 +1005,7 @@ SELECT * FROM a WHERE x + y < 10 ---- select ├── columns: x:1(int!null) y:2(int) + ├── immutable ├── stats: [rows=1666.66667] ├── key: (1) ├── fd: (1)-->(2) @@ -1008,7 +1015,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2) └── filters - └── (x:1 + y:2) < 10 [type=bool, outer=(1,2)] + └── (x:1 + y:2) < 10 [type=bool, outer=(1,2), immutable] # Remaining filter. norm diff --git a/pkg/sql/opt/memo/testdata/stats/with b/pkg/sql/opt/memo/testdata/stats/with index 3d246b01e16e..ee8ee3fa5ef1 100644 --- a/pkg/sql/opt/memo/testdata/stats/with +++ b/pkg/sql/opt/memo/testdata/stats/with @@ -154,3 +154,53 @@ with &1 (t0) │ └── filters (true) └── projections └── NULL [as="?column?":27, type=unknown] + +exec-ddl +CREATE TABLE test ( + id string +) +---- + +# Regression test for #49911. Make sure there is no error if the left side of +# a recursive CTE has cardinality=0. +norm +WITH RECURSIVE hierarchy(id) as + (SELECT id FROM test WHERE id = 'foo' AND 1 != 1 UNION ALL SELECT c.id FROM test AS c, hierarchy AS p WHERE c.id = 'bar') +SELECT * FROM hierarchy +---- +project + ├── columns: id:7(string) + ├── stats: [rows=10] + ├── recursive-c-t-e + │ ├── columns: id:3(string) + │ ├── working table binding: &1 + │ ├── initial columns: test.id:1(string) + │ ├── recursive columns: c.id:4(string) + │ ├── stats: [rows=10] + │ ├── values + │ │ ├── columns: test.id:1(string!null) + │ │ ├── cardinality: [0 - 0] + │ │ ├── stats: [rows=0] + │ │ ├── key: () + │ │ └── fd: ()-->(1) + │ └── inner-join (cross) + │ ├── columns: c.id:4(string!null) + │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + │ ├── stats: [rows=10] + │ ├── fd: ()-->(4) + │ ├── select + │ │ ├── columns: c.id:4(string!null) + │ │ ├── stats: [rows=10, distinct(4)=1, null(4)=0] + │ │ ├── fd: ()-->(4) + │ │ ├── scan c + │ │ │ ├── columns: c.id:4(string) + │ │ │ └── stats: [rows=1000, distinct(4)=100, null(4)=10] + │ │ └── filters + │ │ └── c.id:4 = 'bar' [type=bool, outer=(4), constraints=(/4: [/'bar' - /'bar']; tight), fd=()-->(4)] + │ ├── with-scan &1 (hierarchy) + │ │ ├── mapping: + │ │ ├── cardinality: [1 - ] + │ │ └── stats: [rows=1] + │ └── filters (true) + └── projections + └── id:3 [as=id:7, type=string, outer=(3)] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpcc b/pkg/sql/opt/memo/testdata/stats_quality/tpcc index b3cf06322ced..b80bffa0dc8d 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpcc +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpcc @@ -639,6 +639,7 @@ scalar-group-by ├── save-table-name: consistency_01_scalar_group_by_1 ├── columns: count:22(int!null) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(22)=1, null(22)=0] ├── key: () ├── fd: ()-->(22) @@ -647,6 +648,7 @@ scalar-group-by │ ├── columns: w_id:1(int!null) w_ytd:9(decimal!null) d_w_id:11(int!null) sum:21(decimal!null) │ ├── left ordering: +1 │ ├── right ordering: +11 + │ ├── immutable │ ├── stats: [rows=3.33333333, distinct(1)=3.33333333, null(1)=0, distinct(9)=1, null(9)=0, distinct(11)=3.33333333, null(11)=0, distinct(21)=3.33333333, null(21)=0] │ ├── key: (11) │ ├── fd: (1)-->(9), (11)-->(21), (1)==(11), (11)==(1) @@ -678,7 +680,7 @@ scalar-group-by │ │ └── sum [as=sum:21, type=decimal, outer=(19)] │ │ └── d_ytd:19 [type=decimal] │ └── filters - │ └── w_ytd:9 != sum:21 [type=bool, outer=(9,21), constraints=(/9: (/NULL - ]; /21: (/NULL - ])] + │ └── w_ytd:9 != sum:21 [type=bool, outer=(9,21), immutable, constraints=(/9: (/NULL - ]; /21: (/NULL - ])] └── aggregations └── count-rows [as=count_rows:22, type=int] @@ -819,12 +821,14 @@ scalar-group-by ├── save-table-name: consistency_05_scalar_group_by_1 ├── columns: count:8(int!null) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(8)=1, null(8)=0] ├── key: () ├── fd: ()-->(8) ├── select │ ├── save-table-name: consistency_05_select_2 │ ├── columns: no_d_id:2(int!null) no_w_id:3(int!null) max:4(int!null) min:5(int!null) count_rows:6(int!null) + │ ├── immutable │ ├── stats: [rows=33.3333333, distinct(2)=9.8265847, null(2)=0, distinct(3)=9.8265847, null(3)=0, distinct(4)=33.3333333, null(4)=0, distinct(5)=33.3333333, null(5)=0, distinct(6)=33.3333333, null(6)=0] │ ├── key: (2,3) │ ├── fd: (2,3)-->(4-6) @@ -851,7 +855,7 @@ scalar-group-by │ │ │ └── no_o_id:1 [type=int] │ │ └── count-rows [as=count_rows:6, type=int] │ └── filters - │ └── ((max:4 - min:5) - count_rows:6) != -1 [type=bool, outer=(4-6)] + │ └── ((max:4 - min:5) - count_rows:6) != -1 [type=bool, outer=(4-6), immutable] └── aggregations └── count-rows [as=count_rows:8, type=int] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q01 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q01 index 75e9beb0d2f7..e0173c7581e7 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q01 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q01 @@ -43,6 +43,7 @@ ORDER BY sort ├── save-table-name: q1_sort_1 ├── columns: l_returnflag:9(char!null) l_linestatus:10(char!null) sum_qty:17(float!null) sum_base_price:18(float!null) sum_disc_price:20(float!null) sum_charge:22(float!null) avg_qty:23(float!null) avg_price:24(float!null) avg_disc:25(float!null) count_order:26(int!null) + ├── immutable ├── stats: [rows=6, distinct(9)=3, null(9)=0, distinct(10)=2, null(10)=0, distinct(17)=6, null(17)=0, distinct(18)=6, null(18)=0, distinct(20)=6, null(20)=0, distinct(22)=6, null(22)=0, distinct(23)=6, null(23)=0, distinct(24)=6, null(24)=0, distinct(25)=6, null(25)=0, distinct(26)=6, null(26)=0, distinct(9,10)=6, null(9,10)=0] ├── key: (9,10) ├── fd: (9,10)-->(17,18,20,22-26) @@ -51,12 +52,14 @@ sort ├── save-table-name: q1_group_by_2 ├── columns: l_returnflag:9(char!null) l_linestatus:10(char!null) sum:17(float!null) sum:18(float!null) sum:20(float!null) sum:22(float!null) avg:23(float!null) avg:24(float!null) avg:25(float!null) count_rows:26(int!null) ├── grouping columns: l_returnflag:9(char!null) l_linestatus:10(char!null) + ├── immutable ├── stats: [rows=6, distinct(9)=3, null(9)=0, distinct(10)=2, null(10)=0, distinct(17)=6, null(17)=0, distinct(18)=6, null(18)=0, distinct(20)=6, null(20)=0, distinct(22)=6, null(22)=0, distinct(23)=6, null(23)=0, distinct(24)=6, null(24)=0, distinct(25)=6, null(25)=0, distinct(26)=6, null(26)=0, distinct(9,10)=6, null(9,10)=0] ├── key: (9,10) ├── fd: (9,10)-->(17,18,20,22-26) ├── project │ ├── save-table-name: q1_project_3 │ ├── columns: column19:19(float!null) column21:21(float!null) l_quantity:5(float!null) l_extendedprice:6(float!null) l_discount:7(float!null) l_returnflag:9(char!null) l_linestatus:10(char!null) + │ ├── immutable │ ├── stats: [rows=5925056.21, distinct(5)=50, null(5)=0, distinct(6)=925955, null(6)=0, distinct(7)=11, null(7)=0, distinct(9)=3, null(9)=0, distinct(10)=2, null(10)=0, distinct(19)=5925056.21, null(19)=0, distinct(21)=5925056.21, null(21)=0, distinct(9,10)=6, null(9,10)=0] │ ├── select │ │ ├── save-table-name: q1_select_4 @@ -73,8 +76,8 @@ sort │ │ └── filters │ │ └── l_shipdate:11 <= '1998-09-02' [type=bool, outer=(11), constraints=(/11: (/NULL - /'1998-09-02']; tight)] │ └── projections - │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, type=float, outer=(6,7)] - │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, type=float, outer=(6-8)] + │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, type=float, outer=(6,7), immutable] + │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, type=float, outer=(6-8), immutable] └── aggregations ├── sum [as=sum:17, type=float, outer=(5)] │ └── l_quantity:5 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q03 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q03 index c5cabe216e39..a47e3af03dd5 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q03 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q03 @@ -45,6 +45,7 @@ limit ├── columns: l_orderkey:18(int!null) revenue:35(float!null) o_orderdate:13(date!null) o_shippriority:16(int!null) ├── internal-ordering: -35,+13 ├── cardinality: [0 - 10] + ├── immutable ├── stats: [rows=10, distinct(13)=10, null(13)=0, distinct(16)=10, null(16)=0, distinct(18)=10, null(18)=0, distinct(35)=10, null(35)=0] ├── key: (18) ├── fd: (18)-->(13,16,35) @@ -52,6 +53,7 @@ limit ├── sort │ ├── save-table-name: q3_sort_2 │ ├── columns: o_orderdate:13(date!null) o_shippriority:16(int!null) l_orderkey:18(int!null) sum:35(float!null) + │ ├── immutable │ ├── stats: [rows=359560.406, distinct(13)=359560.406, null(13)=0, distinct(16)=359560.406, null(16)=0, distinct(18)=359560.406, null(18)=0, distinct(35)=359560.406, null(35)=0] │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) @@ -61,12 +63,14 @@ limit │ ├── save-table-name: q3_group_by_3 │ ├── columns: o_orderdate:13(date!null) o_shippriority:16(int!null) l_orderkey:18(int!null) sum:35(float!null) │ ├── grouping columns: l_orderkey:18(int!null) + │ ├── immutable │ ├── stats: [rows=359560.406, distinct(13)=359560.406, null(13)=0, distinct(16)=359560.406, null(16)=0, distinct(18)=359560.406, null(18)=0, distinct(35)=359560.406, null(35)=0] │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) │ ├── project │ │ ├── save-table-name: q3_project_4 │ │ ├── columns: column34:34(float!null) o_orderdate:13(date!null) o_shippriority:16(int!null) l_orderkey:18(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=493779.215, distinct(13)=1169, null(13)=0, distinct(16)=1, null(16)=0, distinct(18)=359560.406, null(18)=0, distinct(34)=410295.908, null(34)=0] │ │ ├── fd: (18)-->(13,16) │ │ ├── inner-join (lookup lineitem) @@ -125,7 +129,7 @@ limit │ │ │ └── filters │ │ │ └── l_shipdate:28 > '1995-03-15' [type=bool, outer=(28), constraints=(/28: [/'1995-03-16' - ]; tight)] │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, type=float, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, type=float, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:35, type=float, outer=(34)] │ │ └── column34:34 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q05 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q05 index f5d097cd57a0..7a315fe43b2d 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q05 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q05 @@ -50,6 +50,7 @@ ORDER BY sort ├── save-table-name: q5_sort_1 ├── columns: n_name:42(char!null) revenue:49(float!null) + ├── immutable ├── stats: [rows=5, distinct(42)=5, null(42)=0, distinct(49)=5, null(49)=0] ├── key: (42) ├── fd: (42)-->(49) @@ -58,12 +59,14 @@ sort ├── save-table-name: q5_group_by_2 ├── columns: n_name:42(char!null) sum:49(float!null) ├── grouping columns: n_name:42(char!null) + ├── immutable ├── stats: [rows=5, distinct(42)=5, null(42)=0, distinct(49)=5, null(49)=0] ├── key: (42) ├── fd: (42)-->(49) ├── project │ ├── save-table-name: q5_project_3 │ ├── columns: column48:48(float!null) n_name:42(char!null) + │ ├── immutable │ ├── stats: [rows=13445.4933, distinct(42)=5, null(42)=0, distinct(48)=13135.9517, null(48)=0] │ ├── inner-join (hash) │ │ ├── save-table-name: q5_inner_join_4 @@ -171,7 +174,7 @@ sort │ │ ├── c_custkey:1 = o_custkey:10 [type=bool, outer=(1,10), constraints=(/1: (/NULL - ]; /10: (/NULL - ]), fd=(1)==(10), (10)==(1)] │ │ └── c_nationkey:4 = s_nationkey:37 [type=bool, outer=(4,37), constraints=(/4: (/NULL - ]; /37: (/NULL - ]), fd=(4)==(37), (37)==(4)] │ └── projections - │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, type=float, outer=(23,24)] + │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, type=float, outer=(23,24), immutable] └── aggregations └── sum [as=sum:49, type=float, outer=(48)] └── column48:48 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q06 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q06 index 52ce7daf007e..e64711c89f24 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q06 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q06 @@ -34,12 +34,14 @@ scalar-group-by ├── save-table-name: q6_scalar_group_by_1 ├── columns: revenue:18(float) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(18)=1, null(18)=0] ├── key: () ├── fd: ()-->(18) ├── project │ ├── save-table-name: q6_project_2 │ ├── columns: column17:17(float!null) + │ ├── immutable │ ├── stats: [rows=34745.8339, distinct(17)=34745.8339, null(17)=0] │ ├── select │ │ ├── save-table-name: q6_select_3 @@ -66,7 +68,7 @@ scalar-group-by │ │ ├── (l_discount:7 >= 0.05) AND (l_discount:7 <= 0.07) [type=bool, outer=(7), constraints=(/7: [/0.05 - /0.07]; tight)] │ │ └── l_quantity:5 < 24.0 [type=bool, outer=(5), constraints=(/5: (/NULL - /23.999999999999996]; tight)] │ └── projections - │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, type=float, outer=(6,7)] + │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, type=float, outer=(6,7), immutable] └── aggregations └── sum [as=sum:18, type=float, outer=(17)] └── column17:17 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q07 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q07 index 64f7627df731..7e096bb15c1a 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q07 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q07 @@ -149,7 +149,7 @@ sort │ │ └── s_nationkey:4 = n1.n_nationkey:41 [type=bool, outer=(4,41), constraints=(/4: (/NULL - ]; /41: (/NULL - ]), fd=(4)==(41), (41)==(4)] │ └── projections │ ├── extract('year', l_shipdate:18) [as=l_year:49, type=float, outer=(18), immutable] - │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, type=float, outer=(13,14)] + │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, type=float, outer=(13,14), immutable] └── aggregations └── sum [as=sum:51, type=float, outer=(50)] └── volume:50 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q08 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q08 index d4c237d2939e..910a82ed9f21 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q08 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q08 @@ -246,7 +246,7 @@ sort │ │ │ │ └── p_partkey:1 = l_partkey:18 [type=bool, outer=(1,18), constraints=(/1: (/NULL - ]; /18: (/NULL - ]), fd=(1)==(18), (18)==(1)] │ │ │ └── projections │ │ │ ├── extract('year', o_orderdate:37) [as=o_year:61, type=float, outer=(37), immutable] - │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, type=float, outer=(22,23)] + │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, type=float, outer=(22,23), immutable] │ │ └── projections │ │ └── CASE WHEN n2.n_name:55 = 'BRAZIL' THEN volume:62 ELSE 0.0 END [as=column63:63, type=float, outer=(55,62)] │ └── aggregations diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q09 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q09 index a9221e0136c2..7237c589ceb7 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q09 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q09 @@ -159,7 +159,7 @@ sort │ │ └── p_name:2 LIKE '%green%' [type=bool, outer=(2), constraints=(/2: (/NULL - ])] │ └── projections │ ├── extract('year', o_orderdate:42) [as=o_year:51, type=float, outer=(42), immutable] - │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, type=float, outer=(21-23,36)] + │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, type=float, outer=(21-23,36), immutable] └── aggregations └── sum [as=sum:53, type=float, outer=(52)] └── amount:52 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q10 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q10 index 9d7b50fb3b8e..99a705233001 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q10 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q10 @@ -57,6 +57,7 @@ limit ├── columns: c_custkey:1(int!null) c_name:2(varchar!null) revenue:39(float!null) c_acctbal:6(float!null) n_name:35(char!null) c_address:3(varchar!null) c_phone:5(char!null) c_comment:8(varchar!null) ├── internal-ordering: -39 ├── cardinality: [0 - 20] + ├── immutable ├── stats: [rows=20, distinct(1)=20, null(1)=0, distinct(2)=20, null(2)=0, distinct(3)=20, null(3)=0, distinct(5)=20, null(5)=0, distinct(6)=20, null(6)=0, distinct(8)=20, null(8)=0, distinct(35)=20, null(35)=0, distinct(39)=20, null(39)=0] ├── key: (1) ├── fd: (1)-->(2,3,5,6,8,35,39) @@ -64,6 +65,7 @@ limit ├── sort │ ├── save-table-name: q10_sort_2 │ ├── columns: c_custkey:1(int!null) c_name:2(varchar!null) c_address:3(varchar!null) c_phone:5(char!null) c_acctbal:6(float!null) c_comment:8(varchar!null) n_name:35(char!null) sum:39(float!null) + │ ├── immutable │ ├── stats: [rows=42917.9526, distinct(1)=42917.9526, null(1)=0, distinct(2)=42917.9526, null(2)=0, distinct(3)=42917.9526, null(3)=0, distinct(5)=42917.9526, null(5)=0, distinct(6)=42917.9526, null(6)=0, distinct(8)=42917.9526, null(8)=0, distinct(35)=42917.9526, null(35)=0, distinct(39)=42917.9526, null(39)=0] │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) @@ -73,12 +75,14 @@ limit │ ├── save-table-name: q10_group_by_3 │ ├── columns: c_custkey:1(int!null) c_name:2(varchar!null) c_address:3(varchar!null) c_phone:5(char!null) c_acctbal:6(float!null) c_comment:8(varchar!null) n_name:35(char!null) sum:39(float!null) │ ├── grouping columns: c_custkey:1(int!null) + │ ├── immutable │ ├── stats: [rows=42917.9526, distinct(1)=42917.9526, null(1)=0, distinct(2)=42917.9526, null(2)=0, distinct(3)=42917.9526, null(3)=0, distinct(5)=42917.9526, null(5)=0, distinct(6)=42917.9526, null(6)=0, distinct(8)=42917.9526, null(8)=0, distinct(35)=42917.9526, null(35)=0, distinct(39)=42917.9526, null(39)=0] │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) │ ├── project │ │ ├── save-table-name: q10_project_4 │ │ ├── columns: column38:38(float!null) c_custkey:1(int!null) c_name:2(varchar!null) c_address:3(varchar!null) c_phone:5(char!null) c_acctbal:6(float!null) c_comment:8(varchar!null) n_name:35(char!null) + │ │ ├── immutable │ │ ├── stats: [rows=91240.8317, distinct(1)=42917.9526, null(1)=0, distinct(2)=68356.4353, null(2)=0, distinct(3)=68348.5807, null(3)=0, distinct(5)=68356.4353, null(5)=0, distinct(6)=67126.327, null(6)=0, distinct(8)=68271.7501, null(8)=0, distinct(35)=25, null(35)=0, distinct(38)=88236.775, null(38)=0] │ │ ├── fd: (1)-->(2,3,5,6,8,35) │ │ ├── inner-join (hash) @@ -143,7 +147,7 @@ limit │ │ │ └── filters │ │ │ └── c_nationkey:4 = n_nationkey:34 [type=bool, outer=(4,34), constraints=(/4: (/NULL - ]; /34: (/NULL - ]), fd=(4)==(34), (34)==(4)] │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, type=float, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, type=float, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:39, type=float, outer=(38)] │ │ └── column38:38 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q11 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q11 index a11abe0e488b..81260c52ed6c 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q11 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q11 @@ -188,7 +188,7 @@ sort │ └── sum [as=sum:36, type=float, outer=(35)] │ └── column35:35 [type=float] └── projections - └── sum:36 * 0.0001 [as="?column?":37, type=float, outer=(36)] + └── sum:36 * 0.0001 [as="?column?":37, type=float, outer=(36), immutable] stats table=q11_sort_1 ---- diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q14 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q14 index 3a2ab8e835e8..d7ed576ff4c6 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q14 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q14 @@ -42,12 +42,14 @@ project │ ├── save-table-name: q14_scalar_group_by_2 │ ├── columns: sum:27(float) sum:29(float) │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── stats: [rows=1, distinct(27)=1, null(27)=0, distinct(29)=1, null(29)=0, distinct(27,29)=1, null(27,29)=0] │ ├── key: () │ ├── fd: ()-->(27,29) │ ├── project │ │ ├── save-table-name: q14_project_3 │ │ ├── columns: column26:26(float!null) column28:28(float!null) + │ │ ├── immutable │ │ ├── stats: [rows=82726.8788, distinct(26)=82726.8788, null(26)=0, distinct(28)=52210.2591, null(28)=0] │ │ ├── inner-join (hash) │ │ │ ├── save-table-name: q14_inner_join_4 @@ -83,8 +85,8 @@ project │ │ │ └── filters │ │ │ └── l_partkey:2 = p_partkey:17 [type=bool, outer=(2,17), constraints=(/2: (/NULL - ]; /17: (/NULL - ]), fd=(2)==(17), (17)==(2)] │ │ └── projections - │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, type=float, outer=(6,7,21)] - │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, type=float, outer=(6,7)] + │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, type=float, outer=(6,7,21), immutable] + │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, type=float, outer=(6,7), immutable] │ └── aggregations │ ├── sum [as=sum:27, type=float, outer=(26)] │ │ └── column26:26 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q15 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q15 index e068989ff7dc..e18e4f141b92 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q15 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q15 @@ -53,6 +53,7 @@ ORDER BY project ├── save-table-name: q15_project_1 ├── columns: s_suppkey:1(int!null) s_name:2(char!null) s_address:3(varchar!null) s_phone:5(char!null) total_revenue:25(float!null) + ├── immutable ├── stats: [rows=3333.33333, distinct(1)=3306.66667, null(1)=0, distinct(2)=2834.3606, null(2)=0, distinct(3)=2834.80729, null(3)=0, distinct(5)=2834.80729, null(5)=0, distinct(25)=2100.04396, null(25)=0] ├── key: (1) ├── fd: (1)-->(2,3,5,25) @@ -62,6 +63,7 @@ project ├── columns: s_suppkey:1(int!null) s_name:2(char!null) s_address:3(varchar!null) s_phone:5(char!null) l_suppkey:10(int!null) sum:25(float!null) ├── left ordering: +1 ├── right ordering: +10 + ├── immutable ├── stats: [rows=3333.33333, distinct(1)=3306.66667, null(1)=0, distinct(2)=2834.3606, null(2)=0, distinct(3)=2834.80729, null(3)=0, distinct(5)=2834.80729, null(5)=0, distinct(10)=3306.66667, null(10)=0, distinct(25)=2100.04396, null(25)=0] ├── key: (10) ├── fd: (1)-->(2,3,5), (10)-->(25), (1)==(10), (10)==(1) @@ -78,6 +80,7 @@ project ├── sort │ ├── save-table-name: q15_sort_4 │ ├── columns: l_suppkey:10(int!null) sum:25(float!null) + │ ├── immutable │ ├── stats: [rows=3306.66667, distinct(10)=3306.66667, null(10)=0, distinct(25)=3306.66667, null(25)=0] │ ├── key: (10) │ ├── fd: (10)-->(25) @@ -85,6 +88,7 @@ project │ └── select │ ├── save-table-name: q15_select_5 │ ├── columns: l_suppkey:10(int!null) sum:25(float!null) + │ ├── immutable │ ├── stats: [rows=3306.66667, distinct(10)=3306.66667, null(10)=0, distinct(25)=3306.66667, null(25)=0] │ ├── key: (10) │ ├── fd: (10)-->(25) @@ -92,12 +96,14 @@ project │ │ ├── save-table-name: q15_group_by_6 │ │ ├── columns: l_suppkey:10(int!null) sum:25(float!null) │ │ ├── grouping columns: l_suppkey:10(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=9920, distinct(10)=9920, null(10)=0, distinct(25)=9920, null(25)=0] │ │ ├── key: (10) │ │ ├── fd: (10)-->(25) │ │ ├── project │ │ │ ├── save-table-name: q15_project_7 │ │ │ ├── columns: column24:24(float!null) l_suppkey:10(int!null) + │ │ │ ├── immutable │ │ │ ├── stats: [rows=259635.063, distinct(10)=9920, null(10)=0, distinct(24)=259635.063, null(24)=0] │ │ │ ├── index-join lineitem │ │ │ │ ├── save-table-name: q15_index_join_8 @@ -117,18 +123,19 @@ project │ │ │ │ ├── key: (8,11) │ │ │ │ └── fd: (8,11)-->(18) │ │ │ └── projections - │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, type=float, outer=(13,14)] + │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, type=float, outer=(13,14), immutable] │ │ └── aggregations │ │ └── sum [as=sum:25, type=float, outer=(24)] │ │ └── column24:24 [type=float] │ └── filters - │ └── eq [type=bool, outer=(25), subquery, constraints=(/25: (/NULL - ])] + │ └── eq [type=bool, outer=(25), immutable, subquery, constraints=(/25: (/NULL - ])] │ ├── sum:25 [type=float] │ └── subquery [type=float] │ └── scalar-group-by │ ├── save-table-name: q15_scalar_group_by_10 │ ├── columns: max:44(float) │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── stats: [rows=1, distinct(44)=1, null(44)=0] │ ├── key: () │ ├── fd: ()-->(44) @@ -136,12 +143,14 @@ project │ │ ├── save-table-name: q15_group_by_11 │ │ ├── columns: l_suppkey:28(int!null) sum:43(float!null) │ │ ├── grouping columns: l_suppkey:28(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=9920, distinct(28)=9920, null(28)=0, distinct(43)=9920, null(43)=0] │ │ ├── key: (28) │ │ ├── fd: (28)-->(43) │ │ ├── project │ │ │ ├── save-table-name: q15_project_12 │ │ │ ├── columns: column42:42(float!null) l_suppkey:28(int!null) + │ │ │ ├── immutable │ │ │ ├── stats: [rows=259635.063, distinct(28)=9920, null(28)=0, distinct(42)=259635.063, null(42)=0] │ │ │ ├── index-join lineitem │ │ │ │ ├── save-table-name: q15_index_join_13 @@ -161,7 +170,7 @@ project │ │ │ │ ├── key: (26,29) │ │ │ │ └── fd: (26,29)-->(36) │ │ │ └── projections - │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, type=float, outer=(31,32)] + │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, type=float, outer=(31,32), immutable] │ │ └── aggregations │ │ └── sum [as=sum:43, type=float, outer=(42)] │ │ └── column42:42 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q17 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q17 index c1bda172c749..4d46d9034226 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q17 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q17 @@ -43,6 +43,7 @@ project ├── save-table-name: q17_project_1 ├── columns: avg_yearly:45(float) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(45)=1, null(45)=0] ├── key: () ├── fd: ()-->(45) @@ -50,6 +51,7 @@ project │ ├── save-table-name: q17_scalar_group_by_2 │ ├── columns: sum:44(float) │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── stats: [rows=1, distinct(44)=1, null(44)=0] │ ├── key: () │ ├── fd: ()-->(44) @@ -58,18 +60,21 @@ project │ │ ├── columns: l_partkey:2(int!null) l_quantity:5(float!null) l_extendedprice:6(float!null) p_partkey:17(int!null) "?column?":43(float!null) │ │ ├── key columns: [1 4] = [1 4] │ │ ├── lookup columns are key + │ │ ├── immutable │ │ ├── stats: [rows=2008.02163, distinct(2)=199.999619, null(2)=0, distinct(5)=50, null(5)=0, distinct(6)=2005.84759, null(6)=0, distinct(17)=199.999619, null(17)=0, distinct(43)=199.999619, null(43)=0] │ │ ├── fd: (17)-->(43), (2)==(17), (17)==(2) │ │ ├── inner-join (lookup lineitem@l_pk) │ │ │ ├── save-table-name: q17_lookup_join_4 │ │ │ ├── columns: l_orderkey:1(int!null) l_partkey:2(int!null) l_linenumber:4(int!null) p_partkey:17(int!null) "?column?":43(float) │ │ │ ├── key columns: [17] = [2] + │ │ │ ├── immutable │ │ │ ├── stats: [rows=6024.06489, distinct(1)=6012.21509, null(1)=0, distinct(2)=199.999619, null(2)=0, distinct(4)=7, null(4)=0, distinct(17)=199.999619, null(17)=0, distinct(43)=199.999619, null(43)=0] │ │ │ ├── key: (1,4) │ │ │ ├── fd: (17)-->(43), (1,4)-->(2), (2)==(17), (17)==(2) │ │ │ ├── project │ │ │ │ ├── save-table-name: q17_project_5 │ │ │ │ ├── columns: "?column?":43(float) p_partkey:17(int!null) + │ │ │ │ ├── immutable │ │ │ │ ├── stats: [rows=199.999619, distinct(17)=199.999619, null(17)=0, distinct(43)=199.999619, null(43)=0] │ │ │ │ ├── key: (17) │ │ │ │ ├── fd: (17)-->(43) @@ -122,7 +127,7 @@ project │ │ │ │ │ └── avg [as=avg:42, type=float, outer=(30)] │ │ │ │ │ └── l_quantity:30 [type=float] │ │ │ │ └── projections - │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, type=float, outer=(42)] + │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, type=float, outer=(42), immutable] │ │ │ └── filters (true) │ │ └── filters │ │ └── l_quantity:5 < "?column?":43 [type=bool, outer=(5,43), constraints=(/5: (/NULL - ]; /43: (/NULL - ])] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q19 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q19 index 8d019234afb4..479cc01ca175 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q19 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q19 @@ -57,12 +57,14 @@ scalar-group-by ├── save-table-name: q19_scalar_group_by_1 ├── columns: revenue:27(float) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(27)=1, null(27)=0] ├── key: () ├── fd: ()-->(27) ├── project │ ├── save-table-name: q19_project_2 │ ├── columns: column26:26(float!null) + │ ├── immutable │ ├── stats: [rows=71.4087386, distinct(26)=71.402791, null(26)=0] │ ├── inner-join (hash) │ │ ├── save-table-name: q19_inner_join_3 @@ -104,7 +106,7 @@ scalar-group-by │ │ ├── p_partkey:17 = l_partkey:2 [type=bool, outer=(2,17), constraints=(/2: (/NULL - ]; /17: (/NULL - ]), fd=(2)==(17), (17)==(2)] │ │ └── ((((((p_brand:20 = 'Brand#12') AND (p_container:23 IN ('SM BOX', 'SM CASE', 'SM PACK', 'SM PKG'))) AND (l_quantity:5 >= 1.0)) AND (l_quantity:5 <= 11.0)) AND (p_size:22 <= 5)) OR (((((p_brand:20 = 'Brand#23') AND (p_container:23 IN ('MED BAG', 'MED BOX', 'MED PACK', 'MED PKG'))) AND (l_quantity:5 >= 10.0)) AND (l_quantity:5 <= 20.0)) AND (p_size:22 <= 10))) OR (((((p_brand:20 = 'Brand#34') AND (p_container:23 IN ('LG BOX', 'LG CASE', 'LG PACK', 'LG PKG'))) AND (l_quantity:5 >= 20.0)) AND (l_quantity:5 <= 30.0)) AND (p_size:22 <= 15)) [type=bool, outer=(5,20,22,23), constraints=(/5: [/1.0 - /30.0]; /20: [/'Brand#12' - /'Brand#12'] [/'Brand#23' - /'Brand#23'] [/'Brand#34' - /'Brand#34']; /22: (/NULL - /15]; /23: [/'LG BOX' - /'LG BOX'] [/'LG CASE' - /'LG CASE'] [/'LG PACK' - /'LG PACK'] [/'LG PKG' - /'LG PKG'] [/'MED BAG' - /'MED BAG'] [/'MED BOX' - /'MED BOX'] [/'MED PACK' - /'MED PACK'] [/'MED PKG' - /'MED PKG'] [/'SM BOX' - /'SM BOX'] [/'SM CASE' - /'SM CASE'] [/'SM PACK' - /'SM PACK'] [/'SM PKG' - /'SM PKG'])] │ └── projections - │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, type=float, outer=(6,7)] + │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, type=float, outer=(6,7), immutable] └── aggregations └── sum [as=sum:27, type=float, outer=(26)] └── column26:26 [type=float] diff --git a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q20 b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q20 index 9344c49d8598..e27e2b35670f 100644 --- a/pkg/sql/opt/memo/testdata/stats_quality/tpch/q20 +++ b/pkg/sql/opt/memo/testdata/stats_quality/tpch/q20 @@ -60,22 +60,26 @@ ORDER BY sort ├── save-table-name: q20_sort_1 ├── columns: s_name:2(char!null) s_address:3(varchar!null) + ├── immutable ├── stats: [rows=392.749612, distinct(2)=392.742232, null(2)=0, distinct(3)=392.749612, null(3)=0] ├── ordering: +2 └── project ├── save-table-name: q20_project_2 ├── columns: s_name:2(char!null) s_address:3(varchar!null) + ├── immutable ├── stats: [rows=392.749612, distinct(2)=392.742232, null(2)=0, distinct(3)=392.749612, null(3)=0] └── inner-join (hash) ├── save-table-name: q20_inner_join_3 ├── columns: s_suppkey:1(int!null) s_name:2(char!null) s_address:3(varchar!null) s_nationkey:4(int!null) n_nationkey:8(int!null) n_name:9(char!null) ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── stats: [rows=392.749612, distinct(1)=392.685411, null(1)=0, distinct(2)=392.742232, null(2)=0, distinct(3)=392.749612, null(3)=0, distinct(4)=1, null(4)=0, distinct(8)=1, null(8)=0, distinct(9)=1, null(9)=0] ├── key: (1) ├── fd: ()-->(9), (1)-->(2-4), (4)==(8), (8)==(4) ├── semi-join (hash) │ ├── save-table-name: q20_semi_join_4 │ ├── columns: s_suppkey:1(int!null) s_name:2(char!null) s_address:3(varchar!null) s_nationkey:4(int!null) + │ ├── immutable │ ├── stats: [rows=9818.7403, distinct(1)=9740.19038, null(1)=0, distinct(2)=9809.64703, null(2)=0, distinct(3)=9818.7403, null(3)=0, distinct(4)=25, null(4)=0] │ ├── key: (1) │ ├── fd: (1)-->(2-4) @@ -92,11 +96,13 @@ sort │ ├── project │ │ ├── save-table-name: q20_project_6 │ │ ├── columns: ps_partkey:12(int!null) ps_suppkey:13(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=36952.1991, distinct(12)=22217.3354, null(12)=0, distinct(13)=9740.19038, null(13)=0] │ │ ├── key: (12,13) │ │ └── project │ │ ├── save-table-name: q20_project_7 │ │ ├── columns: ps_partkey:12(int!null) ps_suppkey:13(int!null) p_partkey:17(int!null) + │ │ ├── immutable │ │ ├── stats: [rows=36960.327, distinct(12)=22217.3354, null(12)=0, distinct(13)=9681.00153, null(13)=0, distinct(17)=22217.3354, null(17)=0] │ │ ├── key: (13,17) │ │ ├── fd: (12)==(17), (17)==(12) @@ -104,12 +110,14 @@ sort │ │ ├── save-table-name: q20_inner_join_8 │ │ ├── columns: ps_partkey:12(int!null) ps_suppkey:13(int!null) ps_availqty:14(int!null) p_partkey:17(int!null) p_name:18(varchar!null) sum:42(float) │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ │ ├── immutable │ │ ├── stats: [rows=36960.327, distinct(12)=22217.3354, null(12)=0, distinct(13)=9681.00153, null(13)=0, distinct(14)=34508.432, null(14)=0, distinct(17)=22217.3354, null(17)=0, distinct(18)=17907.1379, null(18)=0, distinct(42)=34508.432, null(42)=0] │ │ ├── key: (13,17) │ │ ├── fd: (12,13)-->(14,42), (17)-->(18), (12)==(17), (17)==(12) │ │ ├── select │ │ │ ├── save-table-name: q20_select_9 │ │ │ ├── columns: ps_partkey:12(int!null) ps_suppkey:13(int!null) ps_availqty:14(int!null) sum:42(float) + │ │ │ ├── immutable │ │ │ ├── stats: [rows=266100.667, distinct(12)=159991.77, null(12)=0, distinct(13)=9920, null(13)=0, distinct(14)=266100.667, null(14)=0, distinct(42)=266100.667, null(42)=0] │ │ │ ├── key: (12,13) │ │ │ ├── fd: (12,13)-->(14,42) @@ -161,7 +169,7 @@ sort │ │ │ │ └── const-agg [as=ps_availqty:14, type=int, outer=(14)] │ │ │ │ └── ps_availqty:14 [type=int] │ │ │ └── filters - │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [type=bool, outer=(14,42), constraints=(/14: (/NULL - ])] + │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [type=bool, outer=(14,42), immutable, constraints=(/14: (/NULL - ])] │ │ ├── select │ │ │ ├── save-table-name: q20_select_15 │ │ │ ├── columns: p_partkey:17(int!null) p_name:18(varchar!null) diff --git a/pkg/sql/opt/norm/fold_constants_funcs.go b/pkg/sql/opt/norm/fold_constants_funcs.go index 7f3d6dac9a3c..bbf269d3f3c9 100644 --- a/pkg/sql/opt/norm/fold_constants_funcs.go +++ b/pkg/sql/opt/norm/fold_constants_funcs.go @@ -404,7 +404,7 @@ func (c *CustomFuncs) FoldColumnAccess(input opt.ScalarExpr, idx memo.TupleOrdin // FoldFunction evaluates a function expression with constant inputs. It // returns a constant expression as long as the function is contained in the -// FoldFunctionWhitelist, and the evaluation causes no error. +// FoldFunctionAllowlist, and the evaluation causes no error. func (c *CustomFuncs) FoldFunction( args memo.ScalarListExpr, private *memo.FunctionPrivate, ) opt.ScalarExpr { @@ -413,9 +413,9 @@ func (c *CustomFuncs) FoldFunction( if private.Properties.Class != tree.NormalClass { return nil } - // Functions that aren't immutable and also not in the whitelist cannot + // Functions that aren't immutable and also not in the allowlist cannot // be folded. - if _, ok := FoldFunctionWhitelist[private.Name]; !ok && private.Overload.Volatility > tree.VolatilityImmutable { + if _, ok := FoldFunctionAllowlist[private.Name]; !ok && private.Overload.Volatility > tree.VolatilityImmutable { return nil } @@ -442,9 +442,9 @@ func (c *CustomFuncs) FoldFunction( return c.f.ConstructConstVal(result, private.Typ) } -// FoldFunctionWhitelist contains non-immutable functions that are nevertheless +// FoldFunctionAllowlist contains non-immutable functions that are nevertheless // known to be safe for folding. -var FoldFunctionWhitelist = map[string]struct{}{ +var FoldFunctionAllowlist = map[string]struct{}{ // The SQL statement is generated in the optbuilder phase, so the remaining // function execution is immutable. "addgeometrycolumn": {}, diff --git a/pkg/sql/opt/norm/general_funcs.go b/pkg/sql/opt/norm/general_funcs.go index b709940c9609..ffd0ef69ec03 100644 --- a/pkg/sql/opt/norm/general_funcs.go +++ b/pkg/sql/opt/norm/general_funcs.go @@ -570,12 +570,15 @@ func (c *CustomFuncs) OrdinalityOrdering(private *memo.OrdinalityPrivate) physic } // IsSameOrdering evaluates whether the two orderings are equal. -func (c *CustomFuncs) IsSameOrdering( - first physical.OrderingChoice, other physical.OrderingChoice, -) bool { +func (c *CustomFuncs) IsSameOrdering(first, other physical.OrderingChoice) bool { return first.Equals(&other) } +// OrderingImplies returns true if the first OrderingChoice implies the second. +func (c *CustomFuncs) OrderingImplies(first, second physical.OrderingChoice) bool { + return first.Implies(&second) +} + // ----------------------------------------------------------------------- // // Filter functions @@ -950,3 +953,9 @@ func (c *CustomFuncs) CanAddConstInts(first tree.Datum, second tree.Datum) bool func (c *CustomFuncs) IntConst(d *tree.DInt) opt.ScalarExpr { return c.f.ConstructConst(d, types.Int) } + +// IsGreaterThan returns true if the first datum compares as greater than the +// second. +func (c *CustomFuncs) IsGreaterThan(first, second tree.Datum) bool { + return first.Compare(c.f.evalCtx, second) == 1 +} diff --git a/pkg/sql/opt/norm/rules/limit.opt b/pkg/sql/opt/norm/rules/limit.opt index 7a09989239f0..73ec3f86bc00 100644 --- a/pkg/sql/opt/norm/rules/limit.opt +++ b/pkg/sql/opt/norm/rules/limit.opt @@ -202,3 +202,22 @@ $input $limitExpr $ordering ) + +# FoldLimits replaces a Limit on top of a Limit with a single Limit operator +# when the outer limit value is smaller than or equal to the inner limit value +# and the inner ordering implies the outer ordering. Note: the case when the +# outer limit value is larger than the inner is handled by EliminateLimit. +[FoldLimits, Normalize] +(Limit + (Limit + $innerInput:* + $innerLimitExpr:(Const $innerLimit:*) + $innerOrdering:* + ) + $outerLimitExpr:(Const $outerLimit:*) & + ^(IsGreaterThan $outerLimit $innerLimit) + $outerOrdering:* & + (OrderingImplies $innerOrdering $outerOrdering) +) +=> +(Limit $innerInput $outerLimitExpr $innerOrdering) diff --git a/pkg/sql/opt/norm/testdata/rules/bool b/pkg/sql/opt/norm/testdata/rules/bool index 9f2e763947f1..cd353bfcc6aa 100644 --- a/pkg/sql/opt/norm/testdata/rules/bool +++ b/pkg/sql/opt/norm/testdata/rules/bool @@ -386,6 +386,7 @@ SELECT * FROM a WHERE NOT(s ~ 'foo') AND NOT(s !~ 'foo') AND NOT(s ~* 'foo') AND ---- select ├── columns: k:1!null i:2 f:3 s:4!null j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── scan a @@ -393,10 +394,10 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-5) └── filters - ├── s:4 !~ 'foo' [outer=(4), constraints=(/4: (/NULL - ])] - ├── s:4 ~ 'foo' [outer=(4), constraints=(/4: (/NULL - ])] - ├── s:4 !~* 'foo' [outer=(4), constraints=(/4: (/NULL - ])] - └── s:4 ~* 'foo' [outer=(4), constraints=(/4: (/NULL - ])] + ├── s:4 !~ 'foo' [outer=(4), immutable, constraints=(/4: (/NULL - ])] + ├── s:4 ~ 'foo' [outer=(4), immutable, constraints=(/4: (/NULL - ])] + ├── s:4 !~* 'foo' [outer=(4), immutable, constraints=(/4: (/NULL - ])] + └── s:4 ~* 'foo' [outer=(4), immutable, constraints=(/4: (/NULL - ])] norm expect-not=NegateComparison SELECT * FROM a WHERE @@ -408,6 +409,7 @@ SELECT * FROM a WHERE ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── scan a @@ -415,12 +417,12 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-5) └── filters - ├── NOT ('[1, 2]' @> j:5) [outer=(5)] - ├── NOT ('[3, 4]' @> j:5) [outer=(5)] - ├── NOT (j:5 ? 'foo') [outer=(5)] - ├── NOT (j:5 ?| ARRAY['foo']) [outer=(5)] - ├── NOT (j:5 ?& ARRAY['foo']) [outer=(5)] - └── NOT (ARRAY[i:2] && ARRAY[1]) [outer=(2)] + ├── NOT ('[1, 2]' @> j:5) [outer=(5), immutable] + ├── NOT ('[3, 4]' @> j:5) [outer=(5), immutable] + ├── NOT (j:5 ? 'foo') [outer=(5), immutable] + ├── NOT (j:5 ?| ARRAY['foo']) [outer=(5), immutable] + ├── NOT (j:5 ?& ARRAY['foo']) [outer=(5), immutable] + └── NOT (ARRAY[i:2] && ARRAY[1]) [outer=(2), immutable] # -------------------------------------------------- # EliminateNot @@ -476,6 +478,7 @@ SELECT * FROM a WHERE NOT (k >= i OR i < f OR k + i < f) ---- select ├── columns: k:1!null i:2!null f:3!null s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── scan a @@ -485,7 +488,7 @@ select └── filters ├── k:1 < i:2 [outer=(1,2), constraints=(/1: (/NULL - ]; /2: (/NULL - ])] ├── i:2 >= f:3 [outer=(2,3), constraints=(/2: (/NULL - ]; /3: (/NULL - ])] - └── f:3 <= (k:1 + i:2) [outer=(1-3), constraints=(/3: (/NULL - ])] + └── f:3 <= (k:1 + i:2) [outer=(1-3), immutable, constraints=(/3: (/NULL - ])] norm expect=(NegateOr,NegateComparison) SELECT * FROM a WHERE NOT (k >= i OR i < f OR (i > 10 OR i < 5 OR f > 1)) diff --git a/pkg/sql/opt/norm/testdata/rules/combo b/pkg/sql/opt/norm/testdata/rules/combo index c4e4d7d80482..1c04865b0adf 100644 --- a/pkg/sql/opt/norm/testdata/rules/combo +++ b/pkg/sql/opt/norm/testdata/rules/combo @@ -25,8 +25,10 @@ Initial expression ================================================================================ project ├── columns: s:4 + ├── immutable └── inner-join (cross) ├── columns: k:1!null i:2 f:3 s:4 j:5 x:6!null y:7 + ├── immutable ├── key: (1,6) ├── fd: (1)-->(2-5), (3,4)~~>(1,2,5), (6)-->(7) ├── scan a @@ -38,16 +40,18 @@ Initial expression │ ├── key: (6) │ └── fd: (6)-->(7) └── filters - └── (k:1 = x:6) AND ((i:2 + 1) = 10) [outer=(1,2,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ])] + └── (k:1 = x:6) AND ((i:2 + 1) = 10) [outer=(1,2,6), immutable, constraints=(/1: (/NULL - ]; /6: (/NULL - ])] ================================================================================ NormalizeCmpPlusConst Cost: 15470.07 ================================================================================ project ├── columns: s:4 + ├── immutable └── inner-join (cross) - ├── columns: k:1!null i:2 f:3 s:4 j:5 x:6!null y:7 + ├── columns: k:1!null i:2!null f:3 s:4 j:5 x:6!null y:7 + ├── immutable ├── key: (1,6) ├── fd: (1)-->(2-5), (3,4)~~>(1,2,5), (6)-->(7) ├── scan a @@ -59,16 +63,18 @@ NormalizeCmpPlusConst │ ├── key: (6) │ └── fd: (6)-->(7) └── filters - - └── (k:1 = x:6) AND ((i:2 + 1) = 10) [outer=(1,2,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ])] - + └── (k:1 = x:6) AND (i:2 = (10 - 1)) [outer=(1,2,6), constraints=(/1: (/NULL - ]; /2: (/NULL - ]; /6: (/NULL - ])] + - └── (k:1 = x:6) AND ((i:2 + 1) = 10) [outer=(1,2,6), immutable, constraints=(/1: (/NULL - ]; /6: (/NULL - ])] + + └── (k:1 = x:6) AND (i:2 = (10 - 1)) [outer=(1,2,6), immutable, constraints=(/1: (/NULL - ]; /2: (/NULL - ]; /6: (/NULL - ])] ================================================================================ FoldBinary Cost: 12203.40 ================================================================================ project ├── columns: s:4 + - ├── immutable └── inner-join (cross) ├── columns: k:1!null i:2!null f:3 s:4 j:5 x:6!null y:7 + - ├── immutable ├── key: (1,6) - ├── fd: (1)-->(2-5), (3,4)~~>(1,2,5), (6)-->(7) + ├── fd: ()-->(2), (1)-->(3-5), (3,4)~~>(1,5), (6)-->(7) @@ -81,7 +87,7 @@ FoldBinary │ ├── key: (6) │ └── fd: (6)-->(7) └── filters - - └── (k:1 = x:6) AND (i:2 = (10 - 1)) [outer=(1,2,6), constraints=(/1: (/NULL - ]; /2: (/NULL - ]; /6: (/NULL - ])] + - └── (k:1 = x:6) AND (i:2 = (10 - 1)) [outer=(1,2,6), immutable, constraints=(/1: (/NULL - ]; /2: (/NULL - ]; /6: (/NULL - ])] + └── (k:1 = x:6) AND (i:2 = 9) [outer=(1,2,6), constraints=(/1: (/NULL - ]; /2: [/9 - /9]; /6: (/NULL - ]), fd=()-->(2)] ================================================================================ SimplifyJoinFilters diff --git a/pkg/sql/opt/norm/testdata/rules/comp b/pkg/sql/opt/norm/testdata/rules/comp index d6819f776580..1f3276a540f4 100644 --- a/pkg/sql/opt/norm/testdata/rules/comp +++ b/pkg/sql/opt/norm/testdata/rules/comp @@ -13,6 +13,7 @@ SELECT * FROM a WHERE 1+ik AND k/2>=i ---- select ├── columns: k:1!null i:2!null f:3 s:4 j:5 d:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -20,9 +21,9 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - ├── k:1 > (i:2 + 1) [outer=(1,2), constraints=(/1: (/NULL - ])] - ├── i:2 >= (k:1 - 1) [outer=(1,2), constraints=(/2: (/NULL - ])] - ├── k:1 < (i:2 * i:2) [outer=(1,2), constraints=(/1: (/NULL - ])] + ├── k:1 > (i:2 + 1) [outer=(1,2), immutable, constraints=(/1: (/NULL - ])] + ├── i:2 >= (k:1 - 1) [outer=(1,2), immutable, constraints=(/2: (/NULL - ])] + ├── k:1 < (i:2 * i:2) [outer=(1,2), immutable, constraints=(/1: (/NULL - ])] └── i:2 <= (k:1 / 2) [outer=(1,2), constraints=(/2: (/NULL - ])] # -------------------------------------------------- @@ -33,6 +34,7 @@ SELECT * FROM a WHERE 5+1i AND 'foo'>=s ---- select ├── columns: k:1!null i:2!null f:3 s:4!null j:5 d:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -40,8 +42,8 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - ├── (i:2 + k:1) > 6 [outer=(1,2)] - ├── (i:2 * 2) >= 8.3333333333333333333 [outer=(2)] + ├── (i:2 + k:1) > 6 [outer=(1,2), immutable] + ├── (i:2 * 2) >= 8.3333333333333333333 [outer=(2), immutable] ├── i:2 < 5 [outer=(2), constraints=(/2: (/NULL - /4]; tight)] └── s:4 <= 'foo' [outer=(4), constraints=(/4: (/NULL - /'foo']; tight)] @@ -50,6 +52,7 @@ SELECT * FROM a WHERE length('foo')+1(2-6) ├── scan a @@ -57,8 +60,8 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - ├── (i:2 + k:1) > 4 [outer=(1,2)] - └── (i:2 * 2) >= 3 [outer=(2)] + ├── (i:2 + k:1) > 4 [outer=(1,2), immutable] + └── (i:2 * 2) >= 3 [outer=(2), immutable] # Impure function should not be considered constant. norm expect-not=CommuteConstInequality @@ -102,7 +105,7 @@ select └── filters ├── (i:2 >= 2) AND (i:2 > 6) [outer=(2), constraints=(/2: [/7 - ]; tight)] ├── k:1 = 1 [outer=(1), constraints=(/1: [/1 - /1]; tight), fd=()-->(1)] - ├── (f:3 + f:3) < 3.0 [outer=(3)] + ├── (f:3 + f:3) < 3.0 [outer=(3), immutable] └── i:2::INTERVAL >= '01:00:00' [outer=(2), immutable] # Try case that should not match pattern because Minus overload is not defined. @@ -148,7 +151,7 @@ select └── filters ├── (i:2 >= 4) AND (i:2 < 14) [outer=(2), constraints=(/2: [/4 - /13]; tight)] ├── k:1 = 3 [outer=(1), constraints=(/1: [/3 - /3]; tight), fd=()-->(1)] - ├── (f:3 + f:3) < 7.0 [outer=(3)] + ├── (f:3 + f:3) < 7.0 [outer=(3), immutable] ├── (f:3 + i:2::FLOAT8) >= 110.0 [outer=(2,3), immutable] └── d:6 >= '2018-09-30' [outer=(6), constraints=(/6: [/'2018-09-30' - ]; tight)] @@ -194,7 +197,7 @@ select └── filters ├── (i:2 >= -2) AND (i:2 > 10) [outer=(2), constraints=(/2: [/11 - ]; tight)] ├── k:1 = -1 [outer=(1), constraints=(/1: [/-1 - /-1]; tight), fd=()-->(1)] - ├── (f:3 + f:3) > -3.0 [outer=(3)] + ├── (f:3 + f:3) > -3.0 [outer=(3), immutable] └── (f:3 + i:2::FLOAT8) <= -90.0 [outer=(2,3), immutable] # Try case that should not match pattern because Minus overload is not defined. @@ -203,6 +206,7 @@ SELECT * FROM a WHERE '[1, 2]'::json - i = '[1]' ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 d:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -210,7 +214,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── ('[1, 2]' - i:2) = '[1]' [outer=(2)] + └── ('[1, 2]' - i:2) = '[1]' [outer=(2), immutable] # -------------------------------------------------- # NormalizeTupleEquality diff --git a/pkg/sql/opt/norm/testdata/rules/decorrelate b/pkg/sql/opt/norm/testdata/rules/decorrelate index 4afcc5791203..dc659a87c012 100644 --- a/pkg/sql/opt/norm/testdata/rules/decorrelate +++ b/pkg/sql/opt/norm/testdata/rules/decorrelate @@ -1051,18 +1051,22 @@ SELECT i*i/5=ANY(SELECT y FROM xy WHERE x=k) AS r FROM a ---- project ├── columns: r:8 + ├── immutable ├── group-by │ ├── columns: k:1!null scalar:9 bool_or:11 │ ├── grouping columns: k:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(9,11) │ ├── left-join (hash) │ │ ├── columns: k:1!null x:6 y:7 scalar:9 notnull:10 │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one) + │ │ ├── immutable │ │ ├── key: (1) │ │ ├── fd: (1)-->(6,7,9,10), (6)-->(7), (7)~~>(10) │ │ ├── project │ │ │ ├── columns: scalar:9 k:1!null + │ │ │ ├── immutable │ │ │ ├── key: (1) │ │ │ ├── fd: (1)-->(9) │ │ │ ├── scan a @@ -1070,7 +1074,7 @@ project │ │ │ │ ├── key: (1) │ │ │ │ └── fd: (1)-->(2) │ │ │ └── projections - │ │ │ └── (i:2 * i:2) / 5 [as=scalar:9, outer=(2)] + │ │ │ └── (i:2 * i:2) / 5 [as=scalar:9, outer=(2), immutable] │ │ ├── project │ │ │ ├── columns: notnull:10!null x:6!null y:7 │ │ │ ├── key: (6) @@ -1090,7 +1094,7 @@ project │ └── const-agg [as=scalar:9, outer=(9)] │ └── scalar:9 └── projections - └── CASE WHEN bool_or:11 AND (scalar:9 IS NOT NULL) THEN true WHEN bool_or:11 IS NULL THEN false END [as=r:8, outer=(9,11)] + └── CASE WHEN bool_or:11 AND (scalar:9 IS NOT NULL) THEN true WHEN bool_or:11 IS NULL THEN false END [as=r:8, outer=(9,11), immutable] # -------------------------------------------------- # TryDecorrelateProject @@ -1197,9 +1201,11 @@ WHERE EXISTS ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── semi-join-apply ├── columns: k:1!null i:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── scan a @@ -1210,12 +1216,14 @@ project │ ├── columns: x:6!null plus:10 │ ├── outer: (2) │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-one) + │ ├── immutable │ ├── scan xy │ │ ├── columns: x:6!null │ │ └── key: (6) │ ├── project │ │ ├── columns: plus:10!null │ │ ├── outer: (2) + │ │ ├── immutable │ │ ├── select │ │ │ ├── columns: u:8!null │ │ │ ├── outer: (2) @@ -1226,7 +1234,7 @@ project │ │ │ └── filters │ │ │ └── i:2 = 5 [outer=(2), constraints=(/2: [/5 - /5]; tight), fd=()-->(2)] │ │ └── projections - │ │ └── u:8 + 1 [as=plus:10, outer=(8)] + │ │ └── u:8 + 1 [as=plus:10, outer=(8), immutable] │ └── filters │ └── x:6 = plus:10 [outer=(6,10), constraints=(/6: (/NULL - ]; /10: (/NULL - ]), fd=(6)==(10), (10)==(6)] └── filters (true) @@ -1241,9 +1249,11 @@ WHERE EXISTS ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── semi-join-apply ├── columns: k:1!null i:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── scan a @@ -1254,12 +1264,14 @@ project │ ├── columns: x:6 plus:10 │ ├── outer: (2) │ ├── multiplicity: left-rows(one-or-more), right-rows(exactly-one) + │ ├── immutable │ ├── scan xy │ │ ├── columns: x:6!null │ │ └── key: (6) │ ├── project │ │ ├── columns: plus:10!null │ │ ├── outer: (2) + │ │ ├── immutable │ │ ├── select │ │ │ ├── columns: u:8!null │ │ │ ├── outer: (2) @@ -1270,7 +1282,7 @@ project │ │ │ └── filters │ │ │ └── i:2 = 5 [outer=(2), constraints=(/2: [/5 - /5]; tight), fd=()-->(2)] │ │ └── projections - │ │ └── u:8 + 1 [as=plus:10, outer=(8)] + │ │ └── u:8 + 1 [as=plus:10, outer=(8), immutable] │ └── filters │ └── x:6 = plus:10 [outer=(6,10), constraints=(/6: (/NULL - ]; /10: (/NULL - ]), fd=(6)==(10), (10)==(6)] └── filters (true) @@ -1283,14 +1295,17 @@ SELECT (SELECT sum(y + v) FROM xy, uv WHERE x=u AND x=k) FROM a ---- project ├── columns: sum:12 + ├── immutable ├── group-by │ ├── columns: k:1!null sum:11 │ ├── grouping columns: k:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(11) │ ├── left-join (hash) │ │ ├── columns: k:1!null x:6 column10:10 │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one) + │ │ ├── immutable │ │ ├── key: (1) │ │ ├── fd: (6)-->(10), (1)-->(6,10) │ │ ├── scan a @@ -1298,6 +1313,7 @@ project │ │ │ └── key: (1) │ │ ├── project │ │ │ ├── columns: column10:10 x:6!null + │ │ │ ├── immutable │ │ │ ├── key: (6) │ │ │ ├── fd: (6)-->(10) │ │ │ ├── inner-join (hash) @@ -1316,7 +1332,7 @@ project │ │ │ │ └── filters │ │ │ │ └── x:6 = u:8 [outer=(6,8), constraints=(/6: (/NULL - ]; /8: (/NULL - ]), fd=(6)==(8), (8)==(6)] │ │ │ └── projections - │ │ │ └── y:7 + v:9 [as=column10:10, outer=(7,9)] + │ │ │ └── y:7 + v:9 [as=column10:10, outer=(7,9), immutable] │ │ └── filters │ │ └── x:6 = k:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] │ └── aggregations @@ -4075,18 +4091,22 @@ SELECT i*i/100 < ALL(SELECT y FROM xy WHERE x=k) AS r, s FROM a ---- project ├── columns: r:8 s:4 + ├── immutable ├── group-by │ ├── columns: k:1!null s:4 scalar:9 bool_or:11 │ ├── grouping columns: k:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4,9,11) │ ├── left-join (hash) │ │ ├── columns: k:1!null s:4 x:6 y:7 scalar:9 notnull:10 │ │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one) + │ │ ├── immutable │ │ ├── key: (1) │ │ ├── fd: (1)-->(4,6,7,9,10), (6)-->(7), (7)~~>(10) │ │ ├── project │ │ │ ├── columns: scalar:9 k:1!null s:4 + │ │ │ ├── immutable │ │ │ ├── key: (1) │ │ │ ├── fd: (1)-->(4,9) │ │ │ ├── scan a @@ -4094,7 +4114,7 @@ project │ │ │ │ ├── key: (1) │ │ │ │ └── fd: (1)-->(2,4) │ │ │ └── projections - │ │ │ └── (i:2 * i:2) / 100 [as=scalar:9, outer=(2)] + │ │ │ └── (i:2 * i:2) / 100 [as=scalar:9, outer=(2), immutable] │ │ ├── project │ │ │ ├── columns: notnull:10!null x:6!null y:7 │ │ │ ├── key: (6) @@ -4116,7 +4136,7 @@ project │ └── const-agg [as=scalar:9, outer=(9)] │ └── scalar:9 └── projections - └── NOT CASE WHEN bool_or:11 AND (scalar:9 IS NOT NULL) THEN true WHEN bool_or:11 IS NULL THEN false END [as=r:8, outer=(9,11)] + └── NOT CASE WHEN bool_or:11 AND (scalar:9 IS NOT NULL) THEN true WHEN bool_or:11 IS NULL THEN false END [as=r:8, outer=(9,11), immutable] # Regress issue #32270: Panic when expression contains both correlated and # uncorrelated subquery. @@ -4458,8 +4478,10 @@ SELECT i, y FROM a INNER JOIN xy ON (SELECT k+1) = x ---- project ├── columns: i:2 y:7 + ├── immutable └── inner-join-apply ├── columns: k:1!null i:2 x:6!null y:7 "?column?":8 + ├── immutable ├── key: (1,6) ├── fd: (1)-->(2), (1,6)-->(7,8), (6)==(8), (8)==(6) ├── scan a @@ -4470,6 +4492,7 @@ project │ ├── columns: x:6!null y:7 "?column?":8 │ ├── outer: (1) │ ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-more) + │ ├── immutable │ ├── key: (6) │ ├── fd: ()-->(8), (6)-->(7) │ ├── scan xy @@ -4480,6 +4503,7 @@ project │ │ ├── columns: "?column?":8 │ │ ├── outer: (1) │ │ ├── cardinality: [1 - 1] + │ │ ├── immutable │ │ ├── key: () │ │ ├── fd: ()-->(8) │ │ └── (k:1 + 1,) @@ -4608,14 +4632,17 @@ SELECT (VALUES ((SELECT i+1 AS r)), (10), ((SELECT k+1 AS s))) FROM a ---- project ├── columns: column1:9 + ├── immutable ├── ensure-distinct-on │ ├── columns: k:1!null column1:8 │ ├── grouping columns: k:1!null │ ├── error: "more than one row returned by a subquery used as an expression" + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(8) │ ├── inner-join-apply │ │ ├── columns: k:1!null i:2 r:6 s:7 column1:8 + │ │ ├── immutable │ │ ├── fd: (1)-->(2) │ │ ├── scan a │ │ │ ├── columns: k:1!null i:2 @@ -4625,18 +4652,21 @@ project │ │ │ ├── columns: r:6 s:7 column1:8 │ │ │ ├── outer: (1,2) │ │ │ ├── cardinality: [3 - 3] + │ │ │ ├── immutable │ │ │ ├── fd: ()-->(6,7) │ │ │ ├── inner-join (cross) │ │ │ │ ├── columns: r:6 s:7 │ │ │ │ ├── outer: (1,2) │ │ │ │ ├── cardinality: [1 - 1] │ │ │ │ ├── multiplicity: left-rows(exactly-one), right-rows(exactly-one) + │ │ │ │ ├── immutable │ │ │ │ ├── key: () │ │ │ │ ├── fd: ()-->(6,7) │ │ │ │ ├── values │ │ │ │ │ ├── columns: r:6 │ │ │ │ │ ├── outer: (2) │ │ │ │ │ ├── cardinality: [1 - 1] + │ │ │ │ │ ├── immutable │ │ │ │ │ ├── key: () │ │ │ │ │ ├── fd: ()-->(6) │ │ │ │ │ └── (i:2 + 1,) @@ -4644,6 +4674,7 @@ project │ │ │ │ │ ├── columns: s:7 │ │ │ │ │ ├── outer: (1) │ │ │ │ │ ├── cardinality: [1 - 1] + │ │ │ │ │ ├── immutable │ │ │ │ │ ├── key: () │ │ │ │ │ ├── fd: ()-->(7) │ │ │ │ │ └── (k:1 + 1,) diff --git a/pkg/sql/opt/norm/testdata/rules/fold_constants b/pkg/sql/opt/norm/testdata/rules/fold_constants index d4fc7fcd99a6..720fd2d269c1 100644 --- a/pkg/sql/opt/norm/testdata/rules/fold_constants +++ b/pkg/sql/opt/norm/testdata/rules/fold_constants @@ -243,6 +243,7 @@ SELECT 9223372036854775800::INT + 9223372036854775800::INT values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (9223372036854775800 + 9223372036854775800,) @@ -265,6 +266,7 @@ SELECT (-9223372036854775800)::INT - 9223372036854775800::INT values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (-9223372036854775800 - 9223372036854775800,) @@ -287,6 +289,7 @@ SELECT 9223372036854775800::INT * 9223372036854775800::INT values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (9223372036854775800 * 9223372036854775800,) @@ -332,6 +335,7 @@ SELECT B'01' # B'11001001010101' values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (B'01' # B'11001001010101',) @@ -354,6 +358,7 @@ SELECT B'01' | B'11001001010101' values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (B'01' | B'11001001010101',) @@ -455,6 +460,7 @@ SELECT -((-9223372036854775808)::int) values ├── columns: "?column?":1(int) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1] ├── cost: 0.02 ├── key: () @@ -845,10 +851,11 @@ SELECT ARRAY[i, i + 1][2] FROM a ---- project ├── columns: array:7 + ├── immutable ├── scan a │ └── columns: i:2 └── projections - └── i:2 + 1 [as=array:7, outer=(2)] + └── i:2 + 1 [as=array:7, outer=(2), immutable] # Fold when input is a DArray constant. norm expect=FoldIndirection diff --git a/pkg/sql/opt/norm/testdata/rules/groupby b/pkg/sql/opt/norm/testdata/rules/groupby index 0da07bcde89c..6b91051f0623 100644 --- a/pkg/sql/opt/norm/testdata/rules/groupby +++ b/pkg/sql/opt/norm/testdata/rules/groupby @@ -700,26 +700,30 @@ SELECT (SELECT y FROM xy WHERE x+y=k) FROM a ---- project ├── columns: y:8 + ├── immutable ├── ensure-distinct-on │ ├── columns: k:1!null xy.y:7 │ ├── grouping columns: k:1!null │ ├── error: "more than one row returned by a subquery used as an expression" + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(7) │ ├── left-join (hash) │ │ ├── columns: k:1!null xy.y:7 column9:9 │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-one) + │ │ ├── immutable │ │ ├── scan a │ │ │ ├── columns: k:1!null │ │ │ └── key: (1) │ │ ├── project │ │ │ ├── columns: column9:9 xy.y:7 + │ │ │ ├── immutable │ │ │ ├── scan xy │ │ │ │ ├── columns: x:6!null xy.y:7 │ │ │ │ ├── key: (6) │ │ │ │ └── fd: (6)-->(7) │ │ │ └── projections - │ │ │ └── x:6 + xy.y:7 [as=column9:9, outer=(6,7)] + │ │ │ └── x:6 + xy.y:7 [as=column9:9, outer=(6,7), immutable] │ │ └── filters │ │ └── k:1 = column9:9 [outer=(1,9), constraints=(/1: (/NULL - ]; /9: (/NULL - ]), fd=(1)==(9), (9)==(1)] │ └── aggregations @@ -857,18 +861,21 @@ SELECT min(s) FROM (SELECT i+1 AS i2, s FROM a) GROUP BY i2 ---- project ├── columns: min:7!null + ├── immutable └── group-by ├── columns: i2:6!null min:7!null ├── grouping columns: i2:6!null + ├── immutable ├── key: (6) ├── fd: (6)-->(7) ├── project │ ├── columns: i2:6!null s:4!null + │ ├── immutable │ ├── scan a │ │ ├── columns: i:2!null s:4!null │ │ └── key: (2,4) │ └── projections - │ └── i:2 + 1 [as=i2:6, outer=(2)] + │ └── i:2 + 1 [as=i2:6, outer=(2), immutable] └── aggregations └── min [as=min:7, outer=(4)] └── s:4 @@ -1925,6 +1932,7 @@ WHERE x > 100 OR b > 100 ---- project ├── columns: x:1!null y:2!null z:3!null a:4 b:5 c:6 "?column?":7!null + ├── immutable ├── fd: (1)-->(7) ├── select │ ├── columns: column1:1!null column2:2!null column3:3!null a:4 b:5 c:6 @@ -1947,7 +1955,7 @@ project │ └── filters │ └── (column1:1 > 100) OR (b:5 > 100) [outer=(1,5)] └── projections - └── column1:1 + 1 [as="?column?":7, outer=(1)] + └── column1:1 + 1 [as="?column?":7, outer=(1), immutable] # Right input of left join does not have a key, so left side may have dups. norm expect-not=EliminateDistinctOnValues @@ -2025,11 +2033,13 @@ distinct-on ├── columns: x:1!null y:2!null ├── grouping columns: y:2!null ├── cardinality: [1 - 2] + ├── immutable ├── key: (2) ├── fd: (1)-->(2), (2)-->(1) ├── project │ ├── columns: y:2!null column1:1!null │ ├── cardinality: [2 - 2] + │ ├── immutable │ ├── fd: (1)-->(2) │ ├── values │ │ ├── columns: column1:1!null @@ -2037,7 +2047,7 @@ distinct-on │ │ ├── (1,) │ │ └── (2,) │ └── projections - │ └── column1:1 + 1 [as=y:2, outer=(1)] + │ └── column1:1 + 1 [as=y:2, outer=(1), immutable] └── aggregations └── first-agg [as=column1:1, outer=(1)] └── column1:1 diff --git a/pkg/sql/opt/norm/testdata/rules/inline b/pkg/sql/opt/norm/testdata/rules/inline index 3f1bddef930c..062a8bfaf354 100644 --- a/pkg/sql/opt/norm/testdata/rules/inline +++ b/pkg/sql/opt/norm/testdata/rules/inline @@ -103,7 +103,7 @@ select │ ├── (0.00,) │ └── (0.000,) └── filters - ├── column1:1 = 0 [outer=(1), constraints=(/1: [/0 - /0]; tight), fd=()-->(1)] + ├── column1:1 = 0 [outer=(1), immutable, constraints=(/1: [/0 - /0]; tight), fd=()-->(1)] └── column1:1::STRING = '0.00' [outer=(1), immutable] # The rule should trigger, but not inline the composite type. @@ -122,7 +122,7 @@ select │ ├── (0.00, 'b') │ └── (0.000, 'b') └── filters - ├── column1:1 = 0 [outer=(1), constraints=(/1: [/0 - /0]; tight), fd=()-->(1)] + ├── column1:1 = 0 [outer=(1), immutable, constraints=(/1: [/0 - /0]; tight), fd=()-->(1)] ├── column1:1::STRING = '0.00' [outer=(1), immutable] └── column2:2 = 'b' [outer=(2), constraints=(/2: [/'b' - /'b']; tight), fd=()-->(2)] @@ -196,7 +196,7 @@ SELECT one+two+three+four FROM (VALUES (1, $1:::int, 2, $2:::int)) AS t(one, two project ├── columns: "?column?":5 ├── cardinality: [1 - 1] - ├── has-placeholder + ├── immutable, has-placeholder ├── key: () ├── fd: ()-->(5) ├── values @@ -207,7 +207,7 @@ project │ ├── fd: ()-->(2,4) │ └── ($1, $2) └── projections - └── column4:4 + ((column2:2 + 1) + 2) [as="?column?":5, outer=(2,4)] + └── column4:4 + ((column2:2 + 1) + 2) [as="?column?":5, outer=(2,4), immutable] # Multiple constant columns, multiple refs to each, interspersed with other # columns. @@ -258,13 +258,14 @@ SELECT one+two FROM (VALUES (1, 2), (3, 4)) AS t(one, two) project ├── columns: "?column?":3!null ├── cardinality: [2 - 2] + ├── immutable ├── values │ ├── columns: column1:1!null column2:2!null │ ├── cardinality: [2 - 2] │ ├── (1, 2) │ └── (3, 4) └── projections - └── column1:1 + column2:2 [as="?column?":3, outer=(1,2)] + └── column1:1 + column2:2 [as="?column?":3, outer=(1,2), immutable] # -------------------------------------------------- # InlineSelectConstants @@ -456,16 +457,18 @@ SELECT * FROM (SELECT k*2+1 AS expr FROM a) a WHERE expr > 10 ---- project ├── columns: expr:6!null + ├── immutable ├── select │ ├── columns: k:1!null + │ ├── immutable │ ├── key: (1) │ ├── scan a │ │ ├── columns: k:1!null │ │ └── key: (1) │ └── filters - │ └── (k:1 * 2) > 9 [outer=(1)] + │ └── (k:1 * 2) > 9 [outer=(1), immutable] └── projections - └── (k:1 * 2) + 1 [as=expr:6, outer=(1)] + └── (k:1 * 2) + 1 [as=expr:6, outer=(1), immutable] # Inline boolean logic. norm expect=PushSelectIntoInlinableProject @@ -505,14 +508,16 @@ SELECT * FROM (SELECT f+1 AS expr FROM a) a WHERE expr=expr ---- project ├── columns: expr:6 + ├── immutable ├── select │ ├── columns: f:3 + │ ├── immutable │ ├── scan a │ │ └── columns: f:3 │ └── filters - │ └── (f:3 + 1.0) IS DISTINCT FROM CAST(NULL AS FLOAT8) [outer=(3)] + │ └── (f:3 + 1.0) IS DISTINCT FROM CAST(NULL AS FLOAT8) [outer=(3), immutable] └── projections - └── f:3 + 1.0 [as=expr:6, outer=(3)] + └── f:3 + 1.0 [as=expr:6, outer=(3), immutable] # Use outer references in both inlined expression and in referencing expression. norm expect=PushSelectIntoInlinableProject @@ -520,6 +525,7 @@ SELECT * FROM a WHERE EXISTS(SELECT * FROM (SELECT (x-i) AS expr FROM xy) WHERE ---- semi-join (cross) ├── columns: k:1!null i:2 f:3 s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── scan a @@ -530,7 +536,7 @@ semi-join (cross) │ ├── columns: x:6!null │ └── key: (6) └── filters - └── (x:6 - i:2) > (i:2 * i:2) [outer=(2,6)] + └── (x:6 - i:2) > (i:2 * i:2) [outer=(2,6), immutable] exec-ddl CREATE TABLE crdb_internal.zones ( @@ -596,13 +602,14 @@ project │ ├── limit hint: 107.00 │ ├── project │ │ ├── columns: c0:6!null c1:7!null + │ │ ├── immutable │ │ ├── limit hint: 321.00 │ │ ├── scan crdb_internal.public.zones │ │ │ ├── columns: crdb_internal.public.zones.zone_id:1!null │ │ │ └── limit hint: 321.00 │ │ └── projections - │ │ ├── crdb_internal.public.zones.zone_id:1 + 1 [as=c0:6, outer=(1)] - │ │ └── crdb_internal.public.zones.zone_id:1 + 2 [as=c1:7, outer=(1)] + │ │ ├── crdb_internal.public.zones.zone_id:1 + 1 [as=c0:6, outer=(1), immutable] + │ │ └── crdb_internal.public.zones.zone_id:1 + 2 [as=c1:7, outer=(1), immutable] │ └── filters │ └── le [outer=(6,7), stable+volatile, side-effects, correlated-subquery] │ ├── case @@ -636,13 +643,14 @@ SELECT NOT(expr), i+1 AS r FROM (SELECT k=1 AS expr, i FROM a) ---- project ├── columns: "?column?":7!null r:8 + ├── immutable ├── scan a │ ├── columns: k:1!null i:2 │ ├── key: (1) │ └── fd: (1)-->(2) └── projections ├── k:1 != 1 [as="?column?":7, outer=(1)] - └── i:2 + 1 [as=r:8, outer=(2)] + └── i:2 + 1 [as=r:8, outer=(2), immutable] # Multiple synthesized column references to same inner passthrough column # (should still inline). @@ -651,14 +659,15 @@ SELECT x+1, x+2, y1+2 FROM (SELECT x, y+1 AS y1 FROM xy) ---- project ├── columns: "?column?":4!null "?column?":5!null "?column?":6 + ├── immutable ├── scan xy │ ├── columns: x:1!null y:2 │ ├── key: (1) │ └── fd: (1)-->(2) └── projections - ├── x:1 + 1 [as="?column?":4, outer=(1)] - ├── x:1 + 2 [as="?column?":5, outer=(1)] - └── (y:2 + 1) + 2 [as="?column?":6, outer=(2)] + ├── x:1 + 1 [as="?column?":4, outer=(1), immutable] + ├── x:1 + 2 [as="?column?":5, outer=(1), immutable] + └── (y:2 + 1) + 2 [as="?column?":6, outer=(2), immutable] # Synthesized and passthrough references to same inner passthrough column # (should still inline). @@ -667,6 +676,7 @@ SELECT x+y1 FROM (SELECT x, y+1 AS y1 FROM xy) ORDER BY x ---- project ├── columns: "?column?":4 [hidden: x:1!null] + ├── immutable ├── key: (1) ├── fd: (1)-->(4) ├── ordering: +1 @@ -676,7 +686,7 @@ project │ ├── fd: (1)-->(2) │ └── ordering: +1 └── projections - └── x:1 + (y:2 + 1) [as="?column?":4, outer=(1,2)] + └── x:1 + (y:2 + 1) [as="?column?":4, outer=(1,2), immutable] # Inline multiple expressions. norm expect=InlineProjectInProject @@ -684,13 +694,14 @@ SELECT expr+1 AS r, i, expr2 || 'bar' AS s FROM (SELECT k+1 AS expr, s || 'foo' ---- project ├── columns: r:8!null i:2 s:9 + ├── immutable ├── scan a │ ├── columns: k:1!null i:2 a.s:4 │ ├── key: (1) │ └── fd: (1)-->(2,4) └── projections - ├── (k:1 + 1) + 1 [as=r:8, outer=(1)] - └── (a.s:4 || 'foo') || 'bar' [as=s:9, outer=(4)] + ├── (k:1 + 1) + 1 [as=r:8, outer=(1), immutable] + └── (a.s:4 || 'foo') || 'bar' [as=s:9, outer=(4), immutable] # Don't inline when there are multiple references. norm expect-not=InlineProjectInProject @@ -698,16 +709,18 @@ SELECT expr, expr*2 AS r FROM (SELECT k+1 AS expr FROM a) ---- project ├── columns: expr:6!null r:7!null + ├── immutable ├── fd: (6)-->(7) ├── project │ ├── columns: expr:6!null + │ ├── immutable │ ├── scan a │ │ ├── columns: k:1!null │ │ └── key: (1) │ └── projections - │ └── k:1 + 1 [as=expr:6, outer=(1)] + │ └── k:1 + 1 [as=expr:6, outer=(1), immutable] └── projections - └── expr:6 * 2 [as=r:7, outer=(6)] + └── expr:6 * 2 [as=r:7, outer=(6), immutable] # Uncorrelated subquery should not block inlining. norm expect=InlineProjectInProject @@ -715,6 +728,7 @@ SELECT EXISTS(SELECT * FROM xy WHERE x=1 OR x=2), expr*2 AS r FROM (SELECT k+1 A ---- project ├── columns: exists:9 r:10!null + ├── immutable ├── fd: ()-->(9) ├── scan a │ ├── columns: k:1!null @@ -740,7 +754,7 @@ project │ │ └── filters │ │ └── (x:7 = 1) OR (x:7 = 2) [outer=(7), constraints=(/7: [/1 - /1] [/2 - /2]; tight)] │ └── 1 - └── (k:1 + 1) * 2 [as=r:10, outer=(1)] + └── (k:1 + 1) * 2 [as=r:10, outer=(1), immutable] # Correlated subquery should be hoisted as usual. norm expect=InlineProjectInProject @@ -748,26 +762,31 @@ SELECT EXISTS(SELECT * FROM xy WHERE expr<0) FROM (SELECT k+1 AS expr FROM a) ---- project ├── columns: exists:9!null + ├── immutable ├── group-by │ ├── columns: true_agg:11 rownum:13!null │ ├── grouping columns: rownum:13!null + │ ├── immutable │ ├── key: (13) │ ├── fd: (13)-->(11) │ ├── left-join (cross) │ │ ├── columns: expr:6!null true:10 rownum:13!null │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + │ │ ├── immutable │ │ ├── fd: (13)-->(6) │ │ ├── ordinality │ │ │ ├── columns: expr:6!null rownum:13!null + │ │ │ ├── immutable │ │ │ ├── key: (13) │ │ │ ├── fd: (13)-->(6) │ │ │ └── project │ │ │ ├── columns: expr:6!null + │ │ │ ├── immutable │ │ │ ├── scan a │ │ │ │ ├── columns: k:1!null │ │ │ │ └── key: (1) │ │ │ └── projections - │ │ │ └── k:1 + 1 [as=expr:6, outer=(1)] + │ │ │ └── k:1 + 1 [as=expr:6, outer=(1), immutable] │ │ ├── project │ │ │ ├── columns: true:10!null │ │ │ ├── fd: ()-->(10) @@ -788,6 +807,7 @@ SELECT c FROM (SELECT k+2 AS c FROM a) AS t WHERE c > 2; ---- project ├── columns: c:6!null + ├── immutable ├── select │ ├── columns: k:1!null │ ├── key: (1) @@ -797,4 +817,4 @@ project │ └── filters │ └── k:1 > 0 [outer=(1), constraints=(/1: [/1 - ]; tight)] └── projections - └── k:1 + 2 [as=c:6, outer=(1)] + └── k:1 + 2 [as=c:6, outer=(1), immutable] diff --git a/pkg/sql/opt/norm/testdata/rules/join b/pkg/sql/opt/norm/testdata/rules/join index baa909153d90..99c972d98f35 100644 --- a/pkg/sql/opt/norm/testdata/rules/join +++ b/pkg/sql/opt/norm/testdata/rules/join @@ -346,10 +346,12 @@ SELECT * FROM a INNER JOIN b ON a.k=b.x AND a.k + b.y > 5 AND b.x * a.i = 3 inner-join (hash) ├── columns: k:1!null i:2 f:3!null s:4 j:5 x:6!null y:7 ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-one) + ├── immutable ├── key: (6) ├── fd: (1)-->(2-5), (6)-->(7), (1)==(6), (6)==(1) ├── select │ ├── columns: k:1!null i:2 f:3!null s:4 j:5 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2-5) │ ├── scan a @@ -357,9 +359,10 @@ inner-join (hash) │ │ ├── key: (1) │ │ └── fd: (1)-->(2-5) │ └── filters - │ └── (k:1 * i:2) = 3 [outer=(1,2)] + │ └── (k:1 * i:2) = 3 [outer=(1,2), immutable] ├── select │ ├── columns: x:6!null y:7 + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7) │ ├── scan b @@ -367,7 +370,7 @@ inner-join (hash) │ │ ├── key: (6) │ │ └── fd: (6)-->(7) │ └── filters - │ └── (x:6 + y:7) > 5 [outer=(6,7)] + │ └── (x:6 + y:7) > 5 [outer=(6,7), immutable] └── filters └── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] @@ -415,10 +418,12 @@ SELECT * FROM a WHERE EXISTS( ---- semi-join (hash) ├── columns: k:1!null i:2 f:3!null s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── select │ ├── columns: k:1!null i:2 f:3!null s:4 j:5 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2-5) │ ├── scan a @@ -426,9 +431,10 @@ semi-join (hash) │ │ ├── key: (1) │ │ └── fd: (1)-->(2-5) │ └── filters - │ └── (k:1 * i:2) = 3 [outer=(1,2)] + │ └── (k:1 * i:2) = 3 [outer=(1,2), immutable] ├── select │ ├── columns: x:6!null y:7 + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7) │ ├── scan b @@ -436,7 +442,7 @@ semi-join (hash) │ │ ├── key: (6) │ │ └── fd: (6)-->(7) │ └── filters - │ └── (x:6 + y:7) > 5 [outer=(6,7)] + │ └── (x:6 + y:7) > 5 [outer=(6,7), immutable] └── filters └── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] @@ -480,6 +486,7 @@ SELECT * FROM a LEFT JOIN b ON a.k=b.x AND a.k + b.y > 5 AND b.x * a.i = 3 left-join (hash) ├── columns: k:1!null i:2 f:3!null s:4 j:5 x:6 y:7 ├── multiplicity: left-rows(exactly-one), right-rows(zero-or-one) + ├── immutable ├── key: (1) ├── fd: (1)-->(2-7), (6)-->(7) ├── scan a @@ -488,6 +495,7 @@ left-join (hash) │ └── fd: (1)-->(2-5) ├── select │ ├── columns: x:6!null y:7 + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7) │ ├── scan b @@ -495,10 +503,10 @@ left-join (hash) │ │ ├── key: (6) │ │ └── fd: (6)-->(7) │ └── filters - │ └── (x:6 + y:7) > 5 [outer=(6,7)] + │ └── (x:6 + y:7) > 5 [outer=(6,7), immutable] └── filters ├── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] - └── (x:6 * i:2) = 3 [outer=(2,6)] + └── (x:6 * i:2) = 3 [outer=(2,6), immutable] norm expect=MapFilterIntoJoinRight expect-not=PushFilterIntoJoinLeftAndRight SELECT * FROM a LEFT JOIN b ON a.k=b.x AND a.k > 5 AND b.x IN (3, 7, 10) @@ -533,6 +541,7 @@ SELECT * FROM a FULL JOIN b ON a.k=b.x AND a.k + b.y > 5 AND b.x * a.i = 3 full-join (hash) ├── columns: k:1 i:2 f:3 s:4 j:5 x:6 y:7 ├── multiplicity: left-rows(exactly-one), right-rows(exactly-one) + ├── immutable ├── key: (1,6) ├── fd: (1)-->(2-5), (6)-->(7) ├── scan a @@ -545,8 +554,8 @@ full-join (hash) │ └── fd: (6)-->(7) └── filters ├── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] - ├── (k:1 + y:7) > 5 [outer=(1,7)] - └── (x:6 * i:2) = 3 [outer=(2,6)] + ├── (k:1 + y:7) > 5 [outer=(1,7), immutable] + └── (x:6 * i:2) = 3 [outer=(2,6), immutable] norm expect-not=(PushFilterIntoJoinLeftAndRight,MapFilterIntoJoinLeft,MapFilterIntoJoinRight) SELECT * FROM a FULL JOIN b ON a.k=b.x AND a.k > 5 AND b.x IN (3, 7, 10) @@ -577,6 +586,7 @@ SELECT * FROM a WHERE NOT EXISTS( ---- anti-join (hash) ├── columns: k:1!null i:2 f:3!null s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) ├── scan a @@ -585,6 +595,7 @@ anti-join (hash) │ └── fd: (1)-->(2-5) ├── select │ ├── columns: x:6!null y:7 + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7) │ ├── scan b @@ -592,10 +603,10 @@ anti-join (hash) │ │ ├── key: (6) │ │ └── fd: (6)-->(7) │ └── filters - │ └── (x:6 + y:7) > 5 [outer=(6,7)] + │ └── (x:6 + y:7) > 5 [outer=(6,7), immutable] └── filters ├── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] - └── (x:6 * i:2) = 3 [outer=(2,6)] + └── (x:6 * i:2) = 3 [outer=(2,6), immutable] norm expect=MapFilterIntoJoinRight expect-not=PushFilterIntoJoinLeftAndRight SELECT * FROM a WHERE NOT EXISTS( @@ -629,10 +640,12 @@ SELECT * FROM a JOIN b ON a.k = b.x AND b.x * a.i = (SELECT min(b.x) FROM b) inner-join (hash) ├── columns: k:1!null i:2 f:3!null s:4 j:5 x:6!null y:7 ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-one) + ├── immutable ├── key: (6) ├── fd: (1)-->(2-5), (6)-->(7), (1)==(6), (6)==(1) ├── select │ ├── columns: k:1!null i:2 f:3!null s:4 j:5 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2-5) │ ├── scan a @@ -640,7 +653,7 @@ inner-join (hash) │ │ ├── key: (1) │ │ └── fd: (1)-->(2-5) │ └── filters - │ └── eq [outer=(1,2), subquery] + │ └── eq [outer=(1,2), immutable, subquery] │ ├── k:1 * i:2 │ └── subquery │ └── scalar-group-by @@ -667,10 +680,12 @@ SELECT * FROM a JOIN b ON a.k = b.x AND b.x * a.i = (SELECT a.k * b.y FROM b) ---- project ├── columns: k:1!null i:2 f:3!null s:4 j:5 x:6!null y:7 + ├── immutable ├── key: (6) ├── fd: (1)-->(2-5), (1,6)-->(7), (1)==(6), (6)==(1) └── inner-join-apply ├── columns: k:1!null i:2 f:3!null s:4 j:5 x:6!null y:7 "?column?":10 + ├── immutable ├── key: (6) ├── fd: (1)-->(2-5), (1,6)-->(7,10), (1)==(6), (6)==(1) ├── scan a @@ -682,12 +697,14 @@ project │ ├── grouping columns: x:6!null │ ├── error: "more than one row returned by a subquery used as an expression" │ ├── outer: (1) + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7,10) │ ├── left-join (cross) │ │ ├── columns: x:6!null y:7 "?column?":10 │ │ ├── outer: (1) │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + │ │ ├── immutable │ │ ├── fd: (6)-->(7) │ │ ├── scan b │ │ │ ├── columns: x:6!null y:7 @@ -696,10 +713,11 @@ project │ │ ├── project │ │ │ ├── columns: "?column?":10 │ │ │ ├── outer: (1) + │ │ │ ├── immutable │ │ │ ├── scan b │ │ │ │ └── columns: y:9 │ │ │ └── projections - │ │ │ └── k:1 * y:9 [as="?column?":10, outer=(1,9)] + │ │ │ └── k:1 * y:9 [as="?column?":10, outer=(1,9), immutable] │ │ └── filters (true) │ └── aggregations │ ├── const-agg [as=y:7, outer=(7)] @@ -708,7 +726,7 @@ project │ └── "?column?":10 └── filters ├── k:1 = x:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] - └── "?column?":10 = (x:6 * i:2) [outer=(2,6,10), constraints=(/10: (/NULL - ])] + └── "?column?":10 = (x:6 * i:2) [outer=(2,6,10), immutable, constraints=(/10: (/NULL - ])] # Ensure that we do not map filters for types with composite key encoding. norm expect-not=(PushFilterIntoJoinLeftAndRight,MapFilterIntoJoinLeft,MapFilterIntoJoinRight) @@ -737,7 +755,7 @@ inner-join (hash) │ ├── (1.00,) │ └── (2.00,) └── filters - └── column1:1 = column1:2 [outer=(1,2), constraints=(/1: (/NULL - ]; /2: (/NULL - ]), fd=(1)==(2), (2)==(1)] + └── column1:1 = column1:2 [outer=(1,2), immutable, constraints=(/1: (/NULL - ]; /2: (/NULL - ]), fd=(1)==(2), (2)==(1)] # Optimization does not apply if equality is only on one side. norm expect-not=(PushFilterIntoJoinLeftAndRight,MapFilterIntoJoinLeft,MapFilterIntoJoinRight) @@ -745,6 +763,7 @@ SELECT * FROM a INNER JOIN b ON b.y=b.x AND a.k=a.i AND a.k + b.y > 5 AND b.x * ---- inner-join (cross) ├── columns: k:1!null i:2!null f:3!null s:4 j:5 x:6!null y:7!null + ├── immutable ├── key: (1,6) ├── fd: (1)-->(3-5), (1)==(2), (2)==(1), (6)==(7), (7)==(6) ├── select @@ -768,8 +787,8 @@ inner-join (cross) │ └── filters │ └── y:7 = x:6 [outer=(6,7), constraints=(/6: (/NULL - ]; /7: (/NULL - ]), fd=(6)==(7), (7)==(6)] └── filters - ├── (k:1 + y:7) > 5 [outer=(1,7)] - └── (x:6 * i:2) = 3 [outer=(2,6)] + ├── (k:1 + y:7) > 5 [outer=(1,7), immutable] + └── (x:6 * i:2) = 3 [outer=(2,6), immutable] # Ensure that MapFilterIntoJoinRight doesn't cause cycle with decorrelation. norm expect=MapFilterIntoJoinRight @@ -783,14 +802,17 @@ FROM c ---- project ├── columns: x:13 + ├── immutable ├── ensure-distinct-on │ ├── columns: c.x:1!null b.x:4 │ ├── grouping columns: c.x:1!null │ ├── error: "more than one row returned by a subquery used as an expression" + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(4) │ ├── left-join-apply │ │ ├── columns: c.x:1!null b.x:4 k:8 + │ │ ├── immutable │ │ ├── fd: (4)==(8), (8)==(4) │ │ ├── scan c │ │ │ ├── columns: c.x:1!null @@ -799,6 +821,7 @@ project │ │ │ ├── columns: b.x:4!null k:8!null │ │ │ ├── outer: (1) │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ │ │ ├── immutable │ │ │ ├── fd: (4)==(8), (8)==(4) │ │ │ ├── full-join (cross) │ │ │ │ ├── columns: b.x:4 @@ -812,12 +835,13 @@ project │ │ │ │ └── c.x:1 = 5 [outer=(1), constraints=(/1: [/5 - /5]; tight), fd=()-->(1)] │ │ │ ├── select │ │ │ │ ├── columns: k:8!null + │ │ │ │ ├── immutable │ │ │ │ ├── key: (8) │ │ │ │ ├── scan a │ │ │ │ │ ├── columns: k:8!null │ │ │ │ │ └── key: (8) │ │ │ │ └── filters - │ │ │ │ └── (k:8 + k:8) < 5 [outer=(8)] + │ │ │ │ └── (k:8 + k:8) < 5 [outer=(8), immutable] │ │ │ └── filters │ │ │ └── k:8 = b.x:4 [outer=(4,8), constraints=(/4: (/NULL - ]; /8: (/NULL - ]), fd=(4)==(8), (8)==(4)] │ │ └── filters (true) @@ -838,14 +862,17 @@ FROM c ---- project ├── columns: x:13 + ├── immutable ├── ensure-distinct-on │ ├── columns: c.x:1!null b.x:9 │ ├── grouping columns: c.x:1!null │ ├── error: "more than one row returned by a subquery used as an expression" + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(9) │ ├── left-join-apply │ │ ├── columns: c.x:1!null k:4 b.x:9 + │ │ ├── immutable │ │ ├── fd: (4)==(9), (9)==(4) │ │ ├── scan c │ │ │ ├── columns: c.x:1!null @@ -854,15 +881,17 @@ project │ │ │ ├── columns: k:4!null b.x:9!null │ │ │ ├── outer: (1) │ │ │ ├── multiplicity: left-rows(zero-or-more), right-rows(zero-or-one) + │ │ │ ├── immutable │ │ │ ├── fd: (4)==(9), (9)==(4) │ │ │ ├── select │ │ │ │ ├── columns: k:4!null + │ │ │ │ ├── immutable │ │ │ │ ├── key: (4) │ │ │ │ ├── scan a │ │ │ │ │ ├── columns: k:4!null │ │ │ │ │ └── key: (4) │ │ │ │ └── filters - │ │ │ │ └── (k:4 + k:4) < 5 [outer=(4)] + │ │ │ │ └── (k:4 + k:4) < 5 [outer=(4), immutable] │ │ │ ├── full-join (cross) │ │ │ │ ├── columns: b.x:9 │ │ │ │ ├── outer: (1) @@ -897,7 +926,7 @@ SELECT * FROM t1, t2 WHERE a = b AND age(b, TIMESTAMPTZ '2017-01-01') > INTERVAL ---- inner-join (cross) ├── columns: a:1!null b:3!null - ├── immutable, side-effects + ├── stable, side-effects ├── fd: (1)==(3), (3)==(1) ├── scan t1 │ └── columns: a:1 @@ -909,7 +938,7 @@ inner-join (cross) │ └── filters │ └── age(b:3, '2017-01-01 00:00:00+00:00') > '1 day' [outer=(3), immutable, side-effects] └── filters - └── a:1 = b:3 [outer=(1,3), constraints=(/1: (/NULL - ]; /3: (/NULL - ]), fd=(1)==(3), (3)==(1)] + └── a:1 = b:3 [outer=(1,3), stable, constraints=(/1: (/NULL - ]; /3: (/NULL - ]), fd=(1)==(3), (3)==(1)] # Regression for issue 28818. Try to trigger undetectable cycle between the # PushFilterIntoJoinLeftAndRight and TryDecorrelateSelect rules. @@ -1891,10 +1920,12 @@ SELECT * FROM a WHERE (SELECT sum(column1) FROM (VALUES (k), (1))) = 1 ---- project ├── columns: k:1!null i:2 f:3!null s:4 j:5 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-5) └── select ├── columns: k:1!null i:2 f:3!null s:4 j:5 sum:7!null + ├── immutable ├── key: (1) ├── fd: ()-->(7), (1)-->(2-5) ├── group-by @@ -1928,7 +1959,7 @@ project │ └── const-agg [as=j:5, outer=(5)] │ └── j:5 └── filters - └── sum:7 = 1 [outer=(7), constraints=(/7: [/1 - /1]; tight), fd=()-->(7)] + └── sum:7 = 1 [outer=(7), immutable, constraints=(/7: [/1 - /1]; tight), fd=()-->(7)] # Don't simplify left join norm expect-not=SimplifyRightJoin @@ -2092,6 +2123,7 @@ SELECT * FROM a FULL JOIN (SELECT k+1 AS k FROM a) AS a2 ON a.k=a2.k full-join (hash) ├── columns: k:1 i:2 f:3 s:4 j:5 k:11 ├── multiplicity: left-rows(one-or-more), right-rows(exactly-one) + ├── immutable ├── fd: (1)-->(2-5) ├── scan a │ ├── columns: a.k:1!null i:2 f:3!null s:4 j:5 @@ -2099,11 +2131,12 @@ full-join (hash) │ └── fd: (1)-->(2-5) ├── project │ ├── columns: k:11!null + │ ├── immutable │ ├── scan a │ │ ├── columns: a.k:6!null │ │ └── key: (6) │ └── projections - │ └── a.k:6 + 1 [as=k:11, outer=(6)] + │ └── a.k:6 + 1 [as=k:11, outer=(6), immutable] └── filters └── a.k:1 = k:11 [outer=(1,11), constraints=(/1: (/NULL - ]; /11: (/NULL - ]), fd=(1)==(11), (11)==(1)] @@ -2591,6 +2624,7 @@ inner-join (hash) │ └── i:2 > 0 [outer=(2), constraints=(/2: [/1 - ]; tight)] ├── project │ ├── columns: z:8!null x:6!null y:7!null + │ ├── immutable │ ├── key: (6) │ ├── fd: (6)-->(7), (7)-->(8) │ ├── select @@ -2604,7 +2638,7 @@ inner-join (hash) │ │ └── filters │ │ └── y:7 > 10 [outer=(7), constraints=(/7: [/11 - ]; tight)] │ └── projections - │ └── y:7 + 1 [as=z:8, outer=(7)] + │ └── y:7 + 1 [as=z:8, outer=(7), immutable] └── filters ├── f:3 >= z:8::FLOAT8 [outer=(3,8), immutable, constraints=(/3: (/NULL - ])] ├── f:3 >= z:8::FLOAT8 [outer=(3,8), immutable, constraints=(/3: (/NULL - ])] @@ -2640,15 +2674,18 @@ SELECT * FROM xy JOIN uv ON x+y=u ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(3,4), (3)-->(4) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(5), (3)-->(4), (3)==(5), (5)==(3) ├── project │ ├── columns: column5:5 x:1!null y:2 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (1,2)-->(5) │ ├── scan xy @@ -2656,7 +2693,7 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(2) │ └── projections - │ └── x:1 + y:2 [as=column5:5, outer=(1,2)] + │ └── x:1 + y:2 [as=column5:5, outer=(1,2), immutable] ├── scan uv │ ├── columns: u:3!null v:4 │ ├── key: (3) @@ -2669,15 +2706,18 @@ SELECT * FROM xy JOIN uv ON u=x+y ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(3,4), (3)-->(4) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(5), (3)-->(4), (3)==(5), (5)==(3) ├── project │ ├── columns: column5:5 x:1!null y:2 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (1,2)-->(5) │ ├── scan xy @@ -2685,7 +2725,7 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(2) │ └── projections - │ └── x:1 + y:2 [as=column5:5, outer=(1,2)] + │ └── x:1 + y:2 [as=column5:5, outer=(1,2), immutable] ├── scan uv │ ├── columns: u:3!null v:4 │ ├── key: (3) @@ -2698,11 +2738,13 @@ SELECT * FROM xy JOIN uv ON x=u+v ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (3) ├── fd: (1)-->(2), (3)-->(4), (3,4)-->(1,2) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null ├── multiplicity: left-rows(zero-or-more), right-rows(zero-or-one) + ├── immutable ├── key: (3) ├── fd: (1)-->(2), (3)-->(4), (3,4)-->(5), (1)==(5), (5)==(1) ├── scan xy @@ -2711,6 +2753,7 @@ project │ └── fd: (1)-->(2) ├── project │ ├── columns: column5:5 u:3!null v:4 + │ ├── immutable │ ├── key: (3) │ ├── fd: (3)-->(4), (3,4)-->(5) │ ├── scan uv @@ -2718,7 +2761,7 @@ project │ │ ├── key: (3) │ │ └── fd: (3)-->(4) │ └── projections - │ └── u:3 + v:4 [as=column5:5, outer=(3,4)] + │ └── u:3 + v:4 [as=column5:5, outer=(3,4), immutable] └── filters └── x:1 = column5:5 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] @@ -2727,11 +2770,13 @@ SELECT * FROM xy JOIN uv ON u+v=x ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (3) ├── fd: (1)-->(2), (3)-->(4), (3,4)-->(1,2) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null ├── multiplicity: left-rows(zero-or-more), right-rows(zero-or-one) + ├── immutable ├── key: (3) ├── fd: (1)-->(2), (3)-->(4), (3,4)-->(5), (1)==(5), (5)==(1) ├── scan xy @@ -2740,6 +2785,7 @@ project │ └── fd: (1)-->(2) ├── project │ ├── columns: column5:5 u:3!null v:4 + │ ├── immutable │ ├── key: (3) │ ├── fd: (3)-->(4), (3,4)-->(5) │ ├── scan uv @@ -2747,7 +2793,7 @@ project │ │ ├── key: (3) │ │ └── fd: (3)-->(4) │ └── projections - │ └── u:3 + v:4 [as=column5:5, outer=(3,4)] + │ └── u:3 + v:4 [as=column5:5, outer=(3,4), immutable] └── filters └── x:1 = column5:5 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] @@ -2756,14 +2802,17 @@ SELECT * FROM xy JOIN uv ON x+y=u+v ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (3)-->(4) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null column6:6!null + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (1,2)-->(5), (3)-->(4), (3,4)-->(6), (5)==(6), (6)==(5) ├── project │ ├── columns: column5:5 x:1!null y:2 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (1,2)-->(5) │ ├── scan xy @@ -2771,9 +2820,10 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(2) │ └── projections - │ └── x:1 + y:2 [as=column5:5, outer=(1,2)] + │ └── x:1 + y:2 [as=column5:5, outer=(1,2), immutable] ├── project │ ├── columns: column6:6 u:3!null v:4 + │ ├── immutable │ ├── key: (3) │ ├── fd: (3)-->(4), (3,4)-->(6) │ ├── scan uv @@ -2781,7 +2831,7 @@ project │ │ ├── key: (3) │ │ └── fd: (3)-->(4) │ └── projections - │ └── u:3 + v:4 [as=column6:6, outer=(3,4)] + │ └── u:3 + v:4 [as=column6:6, outer=(3,4), immutable] └── filters └── column5:5 = column6:6 [outer=(5,6), constraints=(/5: (/NULL - ]; /6: (/NULL - ]), fd=(5)==(6), (6)==(5)] @@ -2791,15 +2841,18 @@ SELECT * FROM xy JOIN uv ON x+y=u AND x=u+v AND x*y+1=u*v+2 ---- project ├── columns: x:1!null y:2 u:3!null v:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(3,4), (3)-->(4), (3,4)-->(1,2) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4 column5:5!null column6:6!null column7:7!null column8:8!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-one) + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(5,7), (3)-->(4), (3,4)-->(6,8), (3)==(5), (5)==(3), (1)==(6), (6)==(1), (7)==(8), (8)==(7) ├── project │ ├── columns: column7:7 column5:5 x:1!null y:2 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (1,2)-->(5,7) │ ├── scan xy @@ -2807,10 +2860,11 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(2) │ └── projections - │ ├── (x:1 * y:2) + 1 [as=column7:7, outer=(1,2)] - │ └── x:1 + y:2 [as=column5:5, outer=(1,2)] + │ ├── (x:1 * y:2) + 1 [as=column7:7, outer=(1,2), immutable] + │ └── x:1 + y:2 [as=column5:5, outer=(1,2), immutable] ├── project │ ├── columns: column8:8 column6:6 u:3!null v:4 + │ ├── immutable │ ├── key: (3) │ ├── fd: (3)-->(4), (3,4)-->(6,8) │ ├── scan uv @@ -2818,8 +2872,8 @@ project │ │ ├── key: (3) │ │ └── fd: (3)-->(4) │ └── projections - │ ├── (u:3 * v:4) + 2 [as=column8:8, outer=(3,4)] - │ └── u:3 + v:4 [as=column6:6, outer=(3,4)] + │ ├── (u:3 * v:4) + 2 [as=column8:8, outer=(3,4), immutable] + │ └── u:3 + v:4 [as=column6:6, outer=(3,4), immutable] └── filters ├── column5:5 = u:3 [outer=(3,5), constraints=(/3: (/NULL - ]; /5: (/NULL - ]), fd=(3)==(5), (5)==(3)] ├── x:1 = column6:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] @@ -2831,15 +2885,18 @@ SELECT * FROM xy JOIN uv ON x+y=u AND x+u=v ---- project ├── columns: x:1!null y:2 u:3!null v:4!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(3,4), (3)-->(4) └── inner-join (hash) ├── columns: x:1!null y:2 u:3!null v:4!null column5:5!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── key: (1) ├── fd: (1)-->(2), (1,2)-->(5), (3)-->(4), (3)==(5), (5)==(3) ├── project │ ├── columns: column5:5 x:1!null y:2 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2), (1,2)-->(5) │ ├── scan xy @@ -2847,13 +2904,13 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(2) │ └── projections - │ └── x:1 + y:2 [as=column5:5, outer=(1,2)] + │ └── x:1 + y:2 [as=column5:5, outer=(1,2), immutable] ├── scan uv │ ├── columns: u:3!null v:4 │ ├── key: (3) │ └── fd: (3)-->(4) └── filters - ├── v:4 = (x:1 + u:3) [outer=(1,3,4), constraints=(/4: (/NULL - ])] + ├── v:4 = (x:1 + u:3) [outer=(1,3,4), immutable, constraints=(/4: (/NULL - ])] └── column5:5 = u:3 [outer=(3,5), constraints=(/3: (/NULL - ]; /5: (/NULL - ]), fd=(3)==(5), (5)==(3)] # Cases with non-extractable equality. @@ -2882,6 +2939,7 @@ SELECT * FROM xy FULL OUTER JOIN uv ON x+y=1 full-join (cross) ├── columns: x:1 y:2 u:3 v:4 ├── multiplicity: left-rows(one-or-more), right-rows(one-or-more) + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (3)-->(4) ├── scan xy @@ -2893,7 +2951,7 @@ full-join (cross) │ ├── key: (3) │ └── fd: (3)-->(4) └── filters - └── (x:1 + y:2) = 1 [outer=(1,2)] + └── (x:1 + y:2) = 1 [outer=(1,2), immutable] norm expect-not=ExtractJoinEqualities SELECT * FROM xy FULL OUTER JOIN uv ON 1=u+v @@ -2901,6 +2959,7 @@ SELECT * FROM xy FULL OUTER JOIN uv ON 1=u+v full-join (cross) ├── columns: x:1 y:2 u:3 v:4 ├── multiplicity: left-rows(one-or-more), right-rows(one-or-more) + ├── immutable ├── key: (1,3) ├── fd: (1)-->(2), (3)-->(4) ├── scan xy @@ -2912,7 +2971,7 @@ full-join (cross) │ ├── key: (3) │ └── fd: (3)-->(4) └── filters - └── (u:3 + v:4) = 1 [outer=(3,4)] + └── (u:3 + v:4) = 1 [outer=(3,4), immutable] norm expect-not=ExtractJoinEqualities SELECT * FROM xy INNER JOIN uv ON (SELECT k FROM a WHERE i=x)=u diff --git a/pkg/sql/opt/norm/testdata/rules/limit b/pkg/sql/opt/norm/testdata/rules/limit index 13976acf9325..bdedbdeee47e 100644 --- a/pkg/sql/opt/norm/testdata/rules/limit +++ b/pkg/sql/opt/norm/testdata/rules/limit @@ -51,8 +51,9 @@ limit │ └── limit hint: 100.00 └── 100 -# Don't eliminate the outer limit if it's less than the inner. -norm +# Don't eliminate the outer limit if it's less than the inner (the limit is +# instead removed by FoldLimits). +norm expect-not=EliminateLimit SELECT * FROM (SELECT * FROM a LIMIT 100) LIMIT 99 ---- limit @@ -60,22 +61,15 @@ limit ├── cardinality: [0 - 99] ├── key: (1) ├── fd: (1)-->(2-5) - ├── limit + ├── scan a │ ├── columns: k:1!null i:2 f:3 s:4 j:5 - │ ├── cardinality: [0 - 100] │ ├── key: (1) │ ├── fd: (1)-->(2-5) - │ ├── limit hint: 99.00 - │ ├── scan a - │ │ ├── columns: k:1!null i:2 f:3 s:4 j:5 - │ │ ├── key: (1) - │ │ ├── fd: (1)-->(2-5) - │ │ └── limit hint: 100.00 - │ └── 100 + │ └── limit hint: 99.00 └── 99 # High limits (> max uint32), can't eliminate in this case. -norm +norm expect-not=EliminateLimit SELECT * FROM (SELECT * FROM a LIMIT 5000000000) LIMIT 5100000000 ---- limit @@ -95,8 +89,9 @@ limit │ └── 5000000000 └── 5100000000 -# Don't eliminate in case of negative limit. -norm +# Don't eliminate in case of negative limit (the limit is instead removed by +# FoldLimits). +norm expect-not=EliminateLimit SELECT * FROM (SELECT * FROM a LIMIT 0) LIMIT -1 ---- limit @@ -169,6 +164,7 @@ SELECT k, f*2.0 AS r FROM a LIMIT 5 project ├── columns: k:1!null r:6 ├── cardinality: [0 - 5] + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── limit @@ -183,7 +179,7 @@ project │ │ └── limit hint: 5.00 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:6, outer=(3)] + └── f:3 * 2.0 [as=r:6, outer=(3), immutable] norm expect=PushLimitIntoProject SELECT k, f*2.0 AS r FROM a ORDER BY k LIMIT 5 @@ -191,6 +187,7 @@ SELECT k, f*2.0 AS r FROM a ORDER BY k LIMIT 5 project ├── columns: k:1!null r:6 ├── cardinality: [0 - 5] + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── ordering: +1 @@ -209,7 +206,7 @@ project │ │ └── limit hint: 5.00 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:6, outer=(3)] + └── f:3 * 2.0 [as=r:6, outer=(3), immutable] # Don't push the limit through project when the ordering is on a # synthesized column. @@ -220,17 +217,20 @@ limit ├── columns: k:1!null r:6 ├── internal-ordering: +6 ├── cardinality: [0 - 5] + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── ordering: +6 ├── sort │ ├── columns: k:1!null r:6 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(6) │ ├── ordering: +6 │ ├── limit hint: 5.00 │ └── project │ ├── columns: r:6 k:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(6) │ ├── scan a @@ -238,7 +238,7 @@ limit │ │ ├── key: (1) │ │ └── fd: (1)-->(3) │ └── projections - │ └── f:3 * 2.0 [as=r:6, outer=(3)] + │ └── f:3 * 2.0 [as=r:6, outer=(3), immutable] └── 5 @@ -249,6 +249,7 @@ SELECT f, f+1.1 AS r FROM (SELECT f, i FROM a GROUP BY f, i) a ORDER BY f LIMIT project ├── columns: f:3 r:6 ├── cardinality: [0 - 5] + ├── immutable ├── ordering: +3 ├── limit │ ├── columns: i:2 f:3 @@ -270,7 +271,7 @@ project │ │ └── columns: i:2 f:3 │ └── 5 └── projections - └── f:3 + 1.1 [as=r:6, outer=(3)] + └── f:3 + 1.1 [as=r:6, outer=(3), immutable] # Don't push negative limit into Scan. norm @@ -297,6 +298,7 @@ SELECT k, f*2.0 AS r FROM a OFFSET 5 ---- project ├── columns: k:1!null r:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── offset @@ -309,13 +311,14 @@ project │ │ └── fd: (1)-->(3) │ └── 5 └── projections - └── f:3 * 2.0 [as=r:6, outer=(3)] + └── f:3 * 2.0 [as=r:6, outer=(3), immutable] norm expect=PushOffsetIntoProject SELECT k, f*2.0 AS r FROM a ORDER BY k OFFSET 5 ---- project ├── columns: k:1!null r:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── ordering: +1 @@ -332,7 +335,7 @@ project │ │ └── ordering: +1 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:6, outer=(3)] + └── f:3 * 2.0 [as=r:6, outer=(3), immutable] # Don't push the offset through project when the ordering is on a # synthesized column. @@ -342,16 +345,19 @@ SELECT k, f*2.0 AS r FROM a ORDER BY r OFFSET 5 offset ├── columns: k:1!null r:6 ├── internal-ordering: +6 + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── ordering: +6 ├── sort │ ├── columns: k:1!null r:6 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(6) │ ├── ordering: +6 │ └── project │ ├── columns: r:6 k:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(6) │ ├── scan a @@ -359,7 +365,7 @@ offset │ │ ├── key: (1) │ │ └── fd: (1)-->(3) │ └── projections - │ └── f:3 * 2.0 [as=r:6, outer=(3)] + │ └── f:3 * 2.0 [as=r:6, outer=(3), immutable] └── 5 # Detect PushOffsetIntoProject and FilterUnusedOffsetCols dependency cycle. @@ -368,6 +374,7 @@ SELECT f, f+1.1 AS r FROM (SELECT f, i FROM a GROUP BY f, i) a ORDER BY f OFFSET ---- project ├── columns: f:3 r:6 + ├── immutable ├── ordering: +3 ├── offset │ ├── columns: i:2 f:3 @@ -386,7 +393,7 @@ project │ │ └── columns: i:2 f:3 │ └── 5 └── projections - └── f:3 + 1.1 [as=r:6, outer=(3)] + └── f:3 + 1.1 [as=r:6, outer=(3), immutable] # -------------------------------------------------- # PushLimitIntoProject + PushOffsetIntoProject @@ -397,6 +404,7 @@ SELECT k, f*2.0 AS r FROM a OFFSET 5 LIMIT 10 project ├── columns: k:1!null r:6 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── offset @@ -417,7 +425,7 @@ project │ │ └── 15 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:6, outer=(3)] + └── f:3 * 2.0 [as=r:6, outer=(3), immutable] norm expect=(PushLimitIntoProject,PushOffsetIntoProject) SELECT f, f+1.1 AS r FROM (SELECT f, i FROM a GROUP BY f, i) a ORDER BY f OFFSET 5 LIMIT 10 @@ -425,6 +433,7 @@ SELECT f, f+1.1 AS r FROM (SELECT f, i FROM a GROUP BY f, i) a ORDER BY f OFFSET project ├── columns: f:3 r:6 ├── cardinality: [0 - 10] + ├── immutable ├── ordering: +3 ├── offset │ ├── columns: i:2 f:3 @@ -453,7 +462,7 @@ project │ │ └── 15 │ └── 5 └── projections - └── f:3 + 1.1 [as=r:6, outer=(3)] + └── f:3 + 1.1 [as=r:6, outer=(3), immutable] # -------------------------------------------------- # PushLimitIntoOffset @@ -1109,18 +1118,11 @@ limit │ │ ├── cardinality: [0 - 10] │ │ ├── key: (1) │ │ ├── fd: (1)-->(2) - │ │ ├── limit + │ │ ├── scan ab │ │ │ ├── columns: a:1!null b:2 - │ │ │ ├── cardinality: [0 - 20] │ │ │ ├── key: (1) │ │ │ ├── fd: (1)-->(2) - │ │ │ ├── limit hint: 10.00 - │ │ │ ├── scan ab - │ │ │ │ ├── columns: a:1!null b:2 - │ │ │ │ ├── key: (1) - │ │ │ │ ├── fd: (1)-->(2) - │ │ │ │ └── limit hint: 20.00 - │ │ │ └── 20 + │ │ │ └── limit hint: 10.00 │ │ └── 10 │ ├── scan uv │ │ ├── columns: u:3!null v:4 @@ -1245,3 +1247,158 @@ limit │ └── filters │ └── a:1 = u:3 [outer=(1,3), constraints=(/1: (/NULL - ]; /3: (/NULL - ]), fd=(1)==(3), (3)==(1)] └── 10 + +# ---------- +# FoldLimits +# ---------- + +# Basic case with no orderings. +norm expect=FoldLimits +SELECT * FROM (SELECT * FROM ab LIMIT 10) LIMIT 5 +---- +limit + ├── columns: a:1!null b:2 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2) + ├── scan ab + │ ├── columns: a:1!null b:2 + │ ├── key: (1) + │ ├── fd: (1)-->(2) + │ └── limit hint: 5.00 + └── 5 + +# Case where the inner limit has an ordering and the outer limit is unordered. +norm expect=FoldLimits +SELECT * FROM (SELECT * FROM ab ORDER BY a LIMIT 10) LIMIT 5 +---- +limit + ├── columns: a:1!null b:2 + ├── internal-ordering: +1 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2) + ├── scan ab + │ ├── columns: a:1!null b:2 + │ ├── key: (1) + │ ├── fd: (1)-->(2) + │ ├── ordering: +1 + │ └── limit hint: 5.00 + └── 5 + +# Case where the inner limit ordering implies the outer ordering. +norm expect=FoldLimits +SELECT * FROM (SELECT * FROM a ORDER BY i, f LIMIT 10) ORDER BY i LIMIT 5 +---- +limit + ├── columns: k:1!null i:2 f:3 s:4 j:5 + ├── internal-ordering: +2,+3 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2-5) + ├── ordering: +2 + ├── sort + │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ ├── key: (1) + │ ├── fd: (1)-->(2-5) + │ ├── ordering: +2,+3 + │ ├── limit hint: 5.00 + │ └── scan a + │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ ├── key: (1) + │ └── fd: (1)-->(2-5) + └── 5 + +# No-op case where the outer limit is larger than the inner limit. (The limit is +# instead removed by EliminateLimit). +norm expect-not=FoldLimits +SELECT * FROM (SELECT * FROM ab LIMIT 5) LIMIT 10 +---- +limit + ├── columns: a:1!null b:2 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2) + ├── scan ab + │ ├── columns: a:1!null b:2 + │ ├── key: (1) + │ ├── fd: (1)-->(2) + │ └── limit hint: 5.00 + └── 5 + +# No-op case where the inner limit ordering does not imply the outer limit +# ordering. +norm expect-not=FoldLimits +SELECT * FROM (SELECT * FROM ab ORDER BY b LIMIT 10) ORDER BY a LIMIT 5 +---- +limit + ├── columns: a:1!null b:2 + ├── internal-ordering: +1 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2) + ├── ordering: +1 + ├── sort + │ ├── columns: a:1!null b:2 + │ ├── cardinality: [0 - 10] + │ ├── key: (1) + │ ├── fd: (1)-->(2) + │ ├── ordering: +1 + │ ├── limit hint: 5.00 + │ └── limit + │ ├── columns: a:1!null b:2 + │ ├── internal-ordering: +2 + │ ├── cardinality: [0 - 10] + │ ├── key: (1) + │ ├── fd: (1)-->(2) + │ ├── sort + │ │ ├── columns: a:1!null b:2 + │ │ ├── key: (1) + │ │ ├── fd: (1)-->(2) + │ │ ├── ordering: +2 + │ │ ├── limit hint: 10.00 + │ │ └── scan ab + │ │ ├── columns: a:1!null b:2 + │ │ ├── key: (1) + │ │ └── fd: (1)-->(2) + │ └── 10 + └── 5 + +# No-op case where the outer ordering implies the inner, but the inner doesn't +# imply the outer. +norm expect-not=FoldLimits +SELECT * FROM (SELECT * FROM a ORDER BY i LIMIT 10) ORDER BY i, f LIMIT 5 +---- +limit + ├── columns: k:1!null i:2 f:3 s:4 j:5 + ├── internal-ordering: +2,+3 + ├── cardinality: [0 - 5] + ├── key: (1) + ├── fd: (1)-->(2-5) + ├── ordering: +2,+3 + ├── sort (segmented) + │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ ├── cardinality: [0 - 10] + │ ├── key: (1) + │ ├── fd: (1)-->(2-5) + │ ├── ordering: +2,+3 + │ ├── limit hint: 5.00 + │ └── limit + │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ ├── internal-ordering: +2 + │ ├── cardinality: [0 - 10] + │ ├── key: (1) + │ ├── fd: (1)-->(2-5) + │ ├── ordering: +2 + │ ├── sort + │ │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ │ ├── key: (1) + │ │ ├── fd: (1)-->(2-5) + │ │ ├── ordering: +2 + │ │ ├── limit hint: 10.00 + │ │ └── scan a + │ │ ├── columns: k:1!null i:2 f:3 s:4 j:5 + │ │ ├── key: (1) + │ │ └── fd: (1)-->(2-5) + │ └── 10 + └── 5 diff --git a/pkg/sql/opt/norm/testdata/rules/numeric b/pkg/sql/opt/norm/testdata/rules/numeric index c95cd3ba267e..c4a57aeca72b 100644 --- a/pkg/sql/opt/norm/testdata/rules/numeric +++ b/pkg/sql/opt/norm/testdata/rules/numeric @@ -16,15 +16,16 @@ FROM a ---- project ├── columns: r:6 s:7 t:8 u:9 v:10 w:11 + ├── immutable ├── scan a │ └── columns: i:2 f:3 d:4 └── projections - ├── i:2 + i:2 [as=r:6, outer=(2)] - ├── i:2 + i:2 [as=s:7, outer=(2)] - ├── f:3 + f:3 [as=t:8, outer=(3)] - ├── f:3 + f:3 [as=u:9, outer=(3)] - ├── d:4 + d:4 [as=v:10, outer=(4)] - └── d:4 + d:4 [as=w:11, outer=(4)] + ├── i:2 + i:2 [as=r:6, outer=(2), immutable] + ├── i:2 + i:2 [as=s:7, outer=(2), immutable] + ├── f:3 + f:3 [as=t:8, outer=(3), immutable] + ├── f:3 + f:3 [as=u:9, outer=(3), immutable] + ├── d:4 + d:4 [as=v:10, outer=(4), immutable] + └── d:4 + d:4 [as=w:11, outer=(4), immutable] # Regression test for #35113. @@ -64,12 +65,13 @@ FROM a ---- project ├── columns: r:6 s:7 t:8 + ├── immutable ├── scan a │ └── columns: i:2 f:3 d:4 └── projections - ├── i:2 + i:2 [as=r:6, outer=(2)] - ├── f:3 + f:3 [as=s:7, outer=(3)] - └── d:4 + d:4 [as=t:8, outer=(4)] + ├── i:2 + i:2 [as=r:6, outer=(2), immutable] + ├── f:3 + f:3 [as=s:7, outer=(3), immutable] + └── d:4 + d:4 [as=t:8, outer=(4), immutable] # Regression test for #35113. norm expect=FoldMinusZero @@ -108,15 +110,16 @@ FROM a ---- project ├── columns: r:6 s:7 t:8 u:9 v:10 w:11 + ├── immutable ├── scan a │ └── columns: i:2 f:3 d:4 └── projections - ├── i:2 + i:2 [as=r:6, outer=(2)] - ├── i:2 + i:2 [as=s:7, outer=(2)] - ├── f:3 + f:3 [as=t:8, outer=(3)] - ├── f:3 + f:3 [as=u:9, outer=(3)] - ├── d:4 + d:4 [as=v:10, outer=(4)] - └── d:4 + d:4 [as=w:11, outer=(4)] + ├── i:2 + i:2 [as=r:6, outer=(2), immutable] + ├── i:2 + i:2 [as=s:7, outer=(2), immutable] + ├── f:3 + f:3 [as=t:8, outer=(3), immutable] + ├── f:3 + f:3 [as=u:9, outer=(3), immutable] + ├── d:4 + d:4 [as=v:10, outer=(4), immutable] + └── d:4 + d:4 [as=w:11, outer=(4), immutable] # Regression test for #35113. norm expect=FoldMultOne @@ -197,12 +200,13 @@ FROM a ---- project ├── columns: r:6 s:7 t:8 + ├── immutable ├── scan a │ └── columns: i:2 f:3 d:4 a.t:5 └── projections - ├── f:3 - f:3 [as=r:6, outer=(3)] - ├── i:2 - d:4 [as=s:7, outer=(2,4)] - └── a.t:5 - a.t:5 [as=t:8, outer=(5)] + ├── f:3 - f:3 [as=r:6, outer=(3), immutable] + ├── i:2 - d:4 [as=s:7, outer=(2,4), immutable] + └── a.t:5 - a.t:5 [as=t:8, outer=(5), immutable] # -------------------------------------------------- # EliminateUnaryMinus diff --git a/pkg/sql/opt/norm/testdata/rules/ordering b/pkg/sql/opt/norm/testdata/rules/ordering index 0240a6d35021..8ad1113dfab5 100644 --- a/pkg/sql/opt/norm/testdata/rules/ordering +++ b/pkg/sql/opt/norm/testdata/rules/ordering @@ -209,20 +209,23 @@ EXPLAIN SELECT b, b+1 AS plus, c FROM abcde ORDER BY b, plus, c ---- explain ├── columns: tree:7 field:8 description:9 + ├── immutable └── sort ├── columns: b:2 plus:6 c:3 + ├── immutable ├── lax-key: (2,3) ├── fd: (2)-->(6) ├── ordering: +2,+3 └── project ├── columns: plus:6 b:2 c:3 + ├── immutable ├── lax-key: (2,3) ├── fd: (2)-->(6) ├── scan abcde │ ├── columns: b:2 c:3 │ └── lax-key: (2,3) └── projections - └── b:2 + 1 [as=plus:6, outer=(2)] + └── b:2 + 1 [as=plus:6, outer=(2), immutable] # Regression: Explain a statement having constant column, but with no ordering. norm diff --git a/pkg/sql/opt/norm/testdata/rules/project b/pkg/sql/opt/norm/testdata/rules/project index f180b75d1886..6724fe951e74 100644 --- a/pkg/sql/opt/norm/testdata/rules/project +++ b/pkg/sql/opt/norm/testdata/rules/project @@ -86,11 +86,12 @@ SELECT 1+b.x FROM b LEFT JOIN a ON b.x = a.x ---- project ├── columns: "?column?":7!null + ├── immutable ├── scan b │ ├── columns: b.x:1!null │ └── key: (1) └── projections - └── b.x:1 + 1 [as="?column?":7, outer=(1)] + └── b.x:1 + 1 [as="?column?":7, outer=(1), immutable] # Case with no references to the left side. norm expect=EliminateJoinUnderProjectLeft @@ -124,6 +125,7 @@ SELECT b.x, b.z, 1+a.x FROM b LEFT JOIN a ON b.x = a.x ---- project ├── columns: x:1!null z:2 "?column?":7 + ├── immutable ├── key: (1) ├── fd: (1)-->(2,7) ├── left-join (hash) @@ -141,7 +143,7 @@ project │ └── filters │ └── b.x:1 = a.x:3 [outer=(1,3), constraints=(/1: (/NULL - ]; /3: (/NULL - ]), fd=(1)==(3), (3)==(1)] └── projections - └── a.x:3 + 1 [as="?column?":7, outer=(3)] + └── a.x:3 + 1 [as="?column?":7, outer=(3), immutable] # No-op case because r2 is nullable, and therefore rows may not match despite # the fact that it is a foreign key. @@ -306,6 +308,7 @@ SELECT y+1 AS r FROM (SELECT a.y FROM a, b WHERE a.x=b.x) a ---- project ├── columns: r:7 + ├── immutable ├── inner-join (hash) │ ├── columns: a.x:1!null y:2 b.x:5!null │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-one) @@ -321,7 +324,7 @@ project │ └── filters │ └── a.x:1 = b.x:5 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] └── projections - └── y:2 + 1 [as=r:7, outer=(2)] + └── y:2 + 1 [as=r:7, outer=(2), immutable] # Outer and inner projections have synthesized columns. norm expect=MergeProjects @@ -329,11 +332,12 @@ SELECT y1, f+1 FROM (SELECT y+1 AS y1, f FROM a) ---- project ├── columns: y1:5 "?column?":6 + ├── immutable ├── scan a │ └── columns: y:2 f:3 └── projections - ├── f:3 + 1.0 [as="?column?":6, outer=(3)] - └── y:2 + 1 [as=y1:5, outer=(2)] + ├── f:3 + 1.0 [as="?column?":6, outer=(3), immutable] + └── y:2 + 1 [as=y1:5, outer=(2), immutable] # Multiple synthesized columns in both outer and inner projections. norm expect=MergeProjects @@ -341,15 +345,16 @@ SELECT y1, f+1, x2, s||'foo' FROM (SELECT y+1 AS y1, f, s, x*2 AS x2 FROM a) ---- project ├── columns: y1:5 "?column?":7 x2:6!null "?column?":8 + ├── immutable ├── scan a │ ├── columns: x:1!null y:2 f:3 s:4 │ ├── key: (1) │ └── fd: (1)-->(2-4) └── projections - ├── f:3 + 1.0 [as="?column?":7, outer=(3)] - ├── s:4 || 'foo' [as="?column?":8, outer=(4)] - ├── y:2 + 1 [as=y1:5, outer=(2)] - └── x:1 * 2 [as=x2:6, outer=(1)] + ├── f:3 + 1.0 [as="?column?":7, outer=(3), immutable] + ├── s:4 || 'foo' [as="?column?":8, outer=(4), immutable] + ├── y:2 + 1 [as=y1:5, outer=(2), immutable] + └── x:1 * 2 [as=x2:6, outer=(1), immutable] # Outer project selects subset of inner columns. norm expect=MergeProjects @@ -357,10 +362,11 @@ SELECT y1 FROM (SELECT y+1 AS y1, f*2 AS f2 FROM a) ---- project ├── columns: y1:5 + ├── immutable ├── scan a │ └── columns: y:2 └── projections - └── y:2 + 1 [as=y1:5, outer=(2)] + └── y:2 + 1 [as=y1:5, outer=(2), immutable] # Don't merge, since outer depends on inner. norm expect-not=MergeProjects @@ -368,14 +374,16 @@ SELECT y1*2, y1/2 FROM (SELECT y+1 AS y1 FROM a) ---- project ├── columns: "?column?":6 "?column?":7 + ├── immutable ├── project │ ├── columns: y1:5 + │ ├── immutable │ ├── scan a │ │ └── columns: y:2 │ └── projections - │ └── y:2 + 1 [as=y1:5, outer=(2)] + │ └── y:2 + 1 [as=y1:5, outer=(2), immutable] └── projections - ├── y1:5 * 2 [as="?column?":6, outer=(5)] + ├── y1:5 * 2 [as="?column?":6, outer=(5), immutable] └── y1:5 / 2 [as="?column?":7, outer=(5)] # Discard all inner columns. @@ -459,7 +467,7 @@ project │ ├── fd: ()-->(1) │ └── ($1::INT8,) └── projections - ├── column1:1 + 1 [as="?column?":3, outer=(1)] + ├── column1:1 + 1 [as="?column?":3, outer=(1), immutable] └── 3 [as="?column?":4] # -------------------------------------------------- @@ -525,16 +533,19 @@ SELECT (SELECT (tup).@1 * x FROM b) FROM (VALUES ((1,2)), ((3,4))) AS v(tup) project ├── columns: "?column?":5 ├── cardinality: [1 - ] + ├── immutable ├── ensure-distinct-on │ ├── columns: "?column?":4 rownum:8!null │ ├── grouping columns: rownum:8!null │ ├── error: "more than one row returned by a subquery used as an expression" │ ├── cardinality: [1 - ] + │ ├── immutable │ ├── key: (8) │ ├── fd: (8)-->(4) │ ├── left-join-apply │ │ ├── columns: "?column?":4 column1_1:6!null rownum:8!null │ │ ├── cardinality: [2 - ] + │ │ ├── immutable │ │ ├── fd: (8)-->(6) │ │ ├── ordinality │ │ │ ├── columns: column1_1:6!null rownum:8!null @@ -549,11 +560,12 @@ project │ │ ├── project │ │ │ ├── columns: "?column?":4 │ │ │ ├── outer: (6) + │ │ │ ├── immutable │ │ │ ├── scan b │ │ │ │ ├── columns: x:2!null │ │ │ │ └── key: (2) │ │ │ └── projections - │ │ │ └── x:2 * column1_1:6 [as="?column?":4, outer=(2,6)] + │ │ │ └── x:2 * column1_1:6 [as="?column?":4, outer=(2,6), immutable] │ │ └── filters (true) │ └── aggregations │ └── const-agg [as="?column?":4, outer=(4)] @@ -838,6 +850,7 @@ SELECT x*1, x+1 FROM (VALUES (1), (2)) f(x) project ├── columns: "?column?":2!null "?column?":3!null ├── cardinality: [2 - 2] + ├── immutable ├── fd: (2)-->(3) ├── values │ ├── columns: "?column?":2!null @@ -845,7 +858,7 @@ project │ ├── (1,) │ └── (2,) └── projections - └── "?column?":2 + 1 [as="?column?":3, outer=(2)] + └── "?column?":2 + 1 [as="?column?":3, outer=(2), immutable] # Case with a subquery reference to a remapped column. norm expect=PushColumnRemappingIntoValues diff --git a/pkg/sql/opt/norm/testdata/rules/project_set b/pkg/sql/opt/norm/testdata/rules/project_set index 805bce7ba816..970727a0c8d7 100644 --- a/pkg/sql/opt/norm/testdata/rules/project_set +++ b/pkg/sql/opt/norm/testdata/rules/project_set @@ -192,8 +192,10 @@ SELECT unnest(ARRAY[x,y]), unnest(ARRAY[1,x*100]) FROM xy ---- project ├── columns: unnest:4 unnest:5 + ├── immutable └── inner-join-apply ├── columns: x:1!null y:2 unnest:4 unnest:5 + ├── immutable ├── fd: (1)-->(2) ├── scan xy │ ├── columns: x:1!null y:2 @@ -203,6 +205,7 @@ project │ ├── columns: unnest:4 unnest:5 │ ├── outer: (1,2) │ ├── cardinality: [2 - 2] + │ ├── immutable │ ├── (x:1, 1) │ └── (y:2, x:1 * 100) └── filters (true) diff --git a/pkg/sql/opt/norm/testdata/rules/prune_cols b/pkg/sql/opt/norm/testdata/rules/prune_cols index 51fd55beee7a..ac78d128c501 100644 --- a/pkg/sql/opt/norm/testdata/rules/prune_cols +++ b/pkg/sql/opt/norm/testdata/rules/prune_cols @@ -53,11 +53,12 @@ SELECT k1*2 FROM (SELECT k+1 AS k1, i+1 FROM a) a ---- project ├── columns: "?column?":7!null + ├── immutable ├── scan a │ ├── columns: k:1!null │ └── key: (1) └── projections - └── (k:1 + 1) * 2 [as="?column?":7, outer=(1)] + └── (k:1 + 1) * 2 [as="?column?":7, outer=(1), immutable] # Use column values within computed column. norm expect=PruneProjectCols @@ -94,7 +95,7 @@ project │ └── projections │ └── length(s:4) [as=l:5, outer=(4), immutable] └── projections - └── l:5 * 2 [as="?column?":6, outer=(5)] + └── l:5 * 2 [as="?column?":6, outer=(5), immutable] # Compute column based on another computed column. norm expect=PruneProjectCols @@ -117,7 +118,7 @@ project │ └── projections │ └── length(s:4) [as=l:5, outer=(4), immutable] └── projections - └── l:5 * l:5 [as=r:6, outer=(5)] + └── l:5 * l:5 [as=r:6, outer=(5), immutable] # -------------------------------------------------- # PruneScanCols @@ -137,6 +138,7 @@ SELECT k, k+1 AS r, i+1 AS s FROM a ---- project ├── columns: k:1!null r:5!null s:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(5,6) ├── scan a @@ -144,8 +146,8 @@ project │ ├── key: (1) │ └── fd: (1)-->(2) └── projections - ├── k:1 + 1 [as=r:5, outer=(1)] - └── i:2 + 1 [as=s:6, outer=(2)] + ├── k:1 + 1 [as=r:5, outer=(1), immutable] + └── i:2 + 1 [as=s:6, outer=(2), immutable] # Use columns only in computed columns. norm expect=PruneScanCols @@ -153,12 +155,13 @@ SELECT k+i AS r FROM a ---- project ├── columns: r:5 + ├── immutable ├── scan a │ ├── columns: k:1!null i:2 │ ├── key: (1) │ └── fd: (1)-->(2) └── projections - └── k:1 + i:2 [as=r:5, outer=(1,2)] + └── k:1 + i:2 [as=r:5, outer=(1,2), immutable] # Use no scan columns. norm expect=PruneScanCols @@ -232,8 +235,10 @@ SELECT i-1 AS r, k*k AS t FROM a WHERE k+1<5 AND s||'o'='foo' ---- project ├── columns: r:5 t:6!null + ├── immutable ├── select │ ├── columns: k:1!null i:2 s:4 + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,4) │ ├── scan a @@ -242,10 +247,10 @@ project │ │ └── fd: (1)-->(2,4) │ └── filters │ ├── k:1 < 4 [outer=(1), constraints=(/1: (/NULL - /3]; tight)] - │ └── (s:4 || 'o') = 'foo' [outer=(4)] + │ └── (s:4 || 'o') = 'foo' [outer=(4), immutable] └── projections - ├── i:2 - 1 [as=r:5, outer=(2)] - └── k:1 * k:1 [as=t:6, outer=(1)] + ├── i:2 - 1 [as=r:5, outer=(2), immutable] + └── k:1 * k:1 [as=t:6, outer=(1), immutable] # Select nested in select. norm expect=PruneSelectCols @@ -290,8 +295,10 @@ SELECT f, f+1.1 AS r FROM (SELECT f, k FROM a GROUP BY f, k HAVING sum(k)=100) a ---- project ├── columns: f:3 r:6 + ├── immutable ├── select │ ├── columns: k:1!null f:3 sum:5!null + │ ├── immutable │ ├── key: (1) │ ├── fd: ()-->(5), (1)-->(3) │ ├── group-by @@ -309,9 +316,9 @@ project │ │ └── const-agg [as=f:3, outer=(3)] │ │ └── f:3 │ └── filters - │ └── sum:5 = 100 [outer=(5), constraints=(/5: [/100 - /100]; tight), fd=()-->(5)] + │ └── sum:5 = 100 [outer=(5), immutable, constraints=(/5: [/100 - /100]; tight), fd=()-->(5)] └── projections - └── f:3 + 1.1 [as=r:6, outer=(3)] + └── f:3 + 1.1 [as=r:6, outer=(3), immutable] # -------------------------------------------------- # PruneLimitCols @@ -416,6 +423,7 @@ SELECT f, f*2.0 AS r FROM (SELECT f, s FROM a GROUP BY f, s LIMIT 5) a project ├── columns: f:3 r:5 ├── cardinality: [0 - 5] + ├── immutable ├── limit │ ├── columns: f:3 s:4 │ ├── cardinality: [0 - 5] @@ -430,7 +438,7 @@ project │ │ └── limit hint: 6.02 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:5, outer=(3)] + └── f:3 * 2.0 [as=r:5, outer=(3), immutable] # -------------------------------------------------- # PruneOffsetCols @@ -657,6 +665,7 @@ SELECT f, f*2.0 AS r FROM (SELECT f, s FROM a GROUP BY f, s OFFSET 5 LIMIT 5) a project ├── columns: f:3 r:5 ├── cardinality: [0 - 5] + ├── immutable ├── offset │ ├── columns: f:3 s:4 │ ├── cardinality: [0 - 5] @@ -676,7 +685,7 @@ project │ │ └── 10 │ └── 5 └── projections - └── f:3 * 2.0 [as=r:5, outer=(3)] + └── f:3 * 2.0 [as=r:5, outer=(3), immutable] # -------------------------------------------------- # PruneJoinLeftCols @@ -733,6 +742,7 @@ SELECT a.k+1 AS r, xy.* FROM a FULL JOIN xy ON True ---- project ├── columns: r:7 x:5 y:6 + ├── immutable ├── fd: (5)-->(6) ├── full-join (cross) │ ├── columns: k:1 x:5 y:6 @@ -748,7 +758,7 @@ project │ │ └── fd: (5)-->(6) │ └── filters (true) └── projections - └── k:1 + 1 [as=r:7, outer=(1)] + └── k:1 + 1 [as=r:7, outer=(1), immutable] # No columns needed from left side of join. norm expect=PruneJoinLeftCols @@ -770,18 +780,22 @@ SELECT a.k+1 AS r, a.i/2 AS s, xy.* FROM a INNER JOIN xy ON a.k*a.k=xy.x AND a.s ---- project ├── columns: r:8!null s:9 x:5!null y:6 + ├── immutable ├── fd: (5)-->(6) ├── inner-join (hash) │ ├── columns: k:1!null i:2 x:5!null y:6 column7:7!null │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,7), (5)-->(6), (5)==(7), (7)==(5) │ ├── project │ │ ├── columns: column7:7!null k:1!null i:2 + │ │ ├── immutable │ │ ├── key: (1) │ │ ├── fd: (1)-->(2,7) │ │ ├── select │ │ │ ├── columns: k:1!null i:2 a.s:4 + │ │ │ ├── immutable │ │ │ ├── key: (1) │ │ │ ├── fd: (1)-->(2,4) │ │ │ ├── scan a @@ -789,9 +803,9 @@ project │ │ │ │ ├── key: (1) │ │ │ │ └── fd: (1)-->(2,4) │ │ │ └── filters - │ │ │ └── (a.s:4 || 'o') = 'foo' [outer=(4)] + │ │ │ └── (a.s:4 || 'o') = 'foo' [outer=(4), immutable] │ │ └── projections - │ │ └── k:1 * k:1 [as=column7:7, outer=(1)] + │ │ └── k:1 * k:1 [as=column7:7, outer=(1), immutable] │ ├── scan xy │ │ ├── columns: x:5!null y:6 │ │ ├── key: (5) @@ -799,7 +813,7 @@ project │ └── filters │ └── column7:7 = x:5 [outer=(5,7), constraints=(/5: (/NULL - ]; /7: (/NULL - ]), fd=(5)==(7), (7)==(5)] └── projections - ├── k:1 + 1 [as=r:8, outer=(1)] + ├── k:1 + 1 [as=r:8, outer=(1), immutable] └── i:2 / 2 [as=s:9, outer=(2)] # Join that is nested in another join. @@ -849,20 +863,24 @@ WHERE (SELECT k+1 AS r FROM xy WHERE y=k) = 1 ---- project ├── columns: k:1!null i:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) └── select ├── columns: k:1!null i:2 r:7!null + ├── immutable ├── key: (1) ├── fd: ()-->(7), (1)-->(2) ├── ensure-distinct-on │ ├── columns: k:1!null i:2 r:7 │ ├── grouping columns: k:1!null │ ├── error: "more than one row returned by a subquery used as an expression" + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,7) │ ├── left-join-apply │ │ ├── columns: k:1!null i:2 y:6 r:7 + │ │ ├── immutable │ │ ├── fd: (1)-->(2) │ │ ├── scan a │ │ │ ├── columns: k:1!null i:2 @@ -871,11 +889,12 @@ project │ │ ├── project │ │ │ ├── columns: r:7 y:6 │ │ │ ├── outer: (1) + │ │ │ ├── immutable │ │ │ ├── fd: ()-->(7) │ │ │ ├── scan xy │ │ │ │ └── columns: y:6 │ │ │ └── projections - │ │ │ └── k:1 + 1 [as=r:7, outer=(1)] + │ │ │ └── k:1 + 1 [as=r:7, outer=(1), immutable] │ │ └── filters │ │ └── y:6 = k:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] │ └── aggregations @@ -1012,6 +1031,7 @@ SELECT xy.*, a.k+1 AS r FROM xy FULL JOIN a ON True ---- project ├── columns: x:1 y:2 r:7 + ├── immutable ├── fd: (1)-->(2) ├── full-join (cross) │ ├── columns: x:1 y:2 k:3 @@ -1027,7 +1047,7 @@ project │ │ └── key: (3) │ └── filters (true) └── projections - └── k:3 + 1 [as=r:7, outer=(3)] + └── k:3 + 1 [as=r:7, outer=(3), immutable] # No columns needed from right side of join. norm expect=PruneJoinRightCols @@ -1049,10 +1069,12 @@ SELECT xy.*, a.k+1 AS r, a.i/2 AS s FROM xy INNER JOIN a ON xy.x=a.k*a.k AND a.s ---- project ├── columns: x:1!null y:2 r:8!null s:9 + ├── immutable ├── fd: (1)-->(2) ├── inner-join (hash) │ ├── columns: x:1!null y:2 k:3!null i:4 column7:7!null │ ├── multiplicity: left-rows(zero-or-more), right-rows(zero-or-one) + │ ├── immutable │ ├── key: (3) │ ├── fd: (1)-->(2), (3)-->(4,7), (1)==(7), (7)==(1) │ ├── scan xy @@ -1061,10 +1083,12 @@ project │ │ └── fd: (1)-->(2) │ ├── project │ │ ├── columns: column7:7!null k:3!null i:4 + │ │ ├── immutable │ │ ├── key: (3) │ │ ├── fd: (3)-->(4,7) │ │ ├── select │ │ │ ├── columns: k:3!null i:4 a.s:6 + │ │ │ ├── immutable │ │ │ ├── key: (3) │ │ │ ├── fd: (3)-->(4,6) │ │ │ ├── scan a @@ -1072,13 +1096,13 @@ project │ │ │ │ ├── key: (3) │ │ │ │ └── fd: (3)-->(4,6) │ │ │ └── filters - │ │ │ └── (a.s:6 || 'o') = 'foo' [outer=(6)] + │ │ │ └── (a.s:6 || 'o') = 'foo' [outer=(6), immutable] │ │ └── projections - │ │ └── k:3 * k:3 [as=column7:7, outer=(3)] + │ │ └── k:3 * k:3 [as=column7:7, outer=(3), immutable] │ └── filters │ └── x:1 = column7:7 [outer=(1,7), constraints=(/1: (/NULL - ]; /7: (/NULL - ]), fd=(1)==(7), (7)==(1)] └── projections - ├── k:3 + 1 [as=r:8, outer=(3)] + ├── k:3 + 1 [as=r:8, outer=(3), immutable] └── i:4 / 2 [as=s:9, outer=(4)] # Join that is nested in another join. @@ -1144,6 +1168,7 @@ SELECT a.k, xy.x, a.k+xy.x AS r FROM a LEFT JOIN xy ON a.k=xy.x ---- project ├── columns: k:1!null x:5 r:7 + ├── immutable ├── key: (1) ├── fd: (1)-->(5), (1,5)-->(7) ├── left-join (hash) @@ -1160,7 +1185,7 @@ project │ └── filters │ └── k:1 = x:5 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] └── projections - └── k:1 + x:5 [as=r:7, outer=(1,5)] + └── k:1 + x:5 [as=r:7, outer=(1,5), immutable] # -------------------------------------------------- # PruneAggCols @@ -1344,6 +1369,7 @@ SELECT s, i+1 AS r FROM a GROUP BY i, s, s||'foo' ---- project ├── columns: s:4 r:6 + ├── immutable ├── distinct-on │ ├── columns: i:2 s:4 │ ├── grouping columns: i:2 s:4 @@ -1351,7 +1377,7 @@ project │ └── scan a │ └── columns: i:2 s:4 └── projections - └── i:2 + 1 [as=r:6, outer=(2)] + └── i:2 + 1 [as=r:6, outer=(2), immutable] # Groupby a groupby. norm expect=PruneGroupByCols @@ -1400,14 +1426,16 @@ SELECT icnt FROM (SELECT count(i+1) AS icnt, count(k+1) FROM a); scalar-group-by ├── columns: icnt:6!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) ├── project │ ├── columns: column5:5 + │ ├── immutable │ ├── scan a │ │ └── columns: i:2 │ └── projections - │ └── i:2 + 1 [as=column5:5, outer=(2)] + │ └── i:2 + 1 [as=column5:5, outer=(2), immutable] └── aggregations └── count [as=count:6, outer=(5)] └── column5:5 @@ -1499,9 +1527,11 @@ SELECT k FROM (SELECT k, min(s) FROM a GROUP BY k HAVING sum(i) > 5) ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── select ├── columns: k:1!null sum:6!null + ├── immutable ├── key: (1) ├── fd: (1)-->(6) ├── group-by @@ -1517,7 +1547,7 @@ project │ └── sum [as=sum:6, outer=(2)] │ └── i:2 └── filters - └── sum:6 > 5 [outer=(6), constraints=(/6: (/5 - ]; tight)] + └── sum:6 > 5 [outer=(6), immutable, constraints=(/6: (/5 - ]; tight)] # -------------------------------------------------- # PruneOrdinalityCols @@ -1822,16 +1852,19 @@ delete a ├── columns: k:5!null column9:9!null ├── internal-ordering: +9 ├── cardinality: [0 - 10] + ├── immutable ├── key: (5) ├── fd: (5)-->(9) ├── sort │ ├── columns: k:5!null column9:9!null + │ ├── immutable │ ├── key: (5) │ ├── fd: (5)-->(9) │ ├── ordering: +9 │ ├── limit hint: 10.00 │ └── project │ ├── columns: column9:9!null k:5!null + │ ├── immutable │ ├── key: (5) │ ├── fd: (5)-->(9) │ ├── select @@ -1845,7 +1878,7 @@ delete a │ │ └── filters │ │ └── i:6 > 0 [outer=(6), constraints=(/6: [/1 - ]; tight)] │ └── projections - │ └── i:6 * 2 [as=column9:9, outer=(6)] + │ └── i:6 * 2 [as=column9:9, outer=(6), immutable] └── 10 # Prune when a secondary index is present on the table. @@ -1931,6 +1964,7 @@ update "family" ├── volatile, side-effects, mutations └── project ├── columns: a_new:11!null a:6!null b:7 c:8 d:9 e:10 + ├── immutable ├── key: (6) ├── fd: (6)-->(7-11) ├── select @@ -1944,7 +1978,7 @@ update "family" │ └── filters │ └── a:6 > 100 [outer=(6), constraints=(/6: [/101 - ]; tight)] └── projections - └── a:6 + 1 [as=a_new:11, outer=(6)] + └── a:6 + 1 [as=a_new:11, outer=(6), immutable] # Do not prune columns that must be returned. norm expect=(PruneMutationFetchCols, PruneMutationReturnCols) @@ -1963,6 +1997,7 @@ project ├── fd: (1)-->(2) └── project ├── columns: c_new:11 a:6!null b:7 c:8 d:9 + ├── immutable ├── key: (6) ├── fd: (6)-->(7-9), (8)-->(11) ├── scan "family" @@ -1970,7 +2005,7 @@ project │ ├── key: (6) │ └── fd: (6)-->(7-9) └── projections - └── c:8 + 1 [as=c_new:11, outer=(8)] + └── c:8 + 1 [as=c_new:11, outer=(8), immutable] # Prune unused upsert columns. norm expect=PruneMutationInputCols @@ -1992,6 +2027,7 @@ upsert a └── project ├── columns: upsert_i:15 column1:5!null column2:6!null column7:7 column8:8 k:9 i:10 f:11 s:12 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(5-12,15) ├── left-join (cross) @@ -2019,7 +2055,7 @@ upsert a │ │ └── k:9 = 1 [outer=(9), constraints=(/9: [/1 - /1]; tight), fd=()-->(9)] │ └── filters (true) └── projections - └── CASE WHEN k:9 IS NULL THEN column7:7 ELSE i:10 + 1 END [as=upsert_i:15, outer=(7,9,10)] + └── CASE WHEN k:9 IS NULL THEN column7:7 ELSE i:10 + 1 END [as=upsert_i:15, outer=(7,9,10), immutable] # Prune update columns replaced by upsert columns. # TODO(andyk): Need to also prune output columns. @@ -2049,6 +2085,7 @@ upsert a └── project ├── columns: upsert_k:14 upsert_i:15 upsert_f:16 upsert_s:17 column1:5!null column2:6!null column7:7 column8:8 k:9 i:10 f:11 s:12 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(5-12,14-17) ├── left-join (cross) @@ -2077,7 +2114,7 @@ upsert a │ └── filters (true) └── projections ├── CASE WHEN k:9 IS NULL THEN column1:5 ELSE k:9 END [as=upsert_k:14, outer=(5,9)] - ├── CASE WHEN k:9 IS NULL THEN column7:7 ELSE i:10 + 1 END [as=upsert_i:15, outer=(7,9,10)] + ├── CASE WHEN k:9 IS NULL THEN column7:7 ELSE i:10 + 1 END [as=upsert_i:15, outer=(7,9,10), immutable] ├── CASE WHEN k:9 IS NULL THEN column8:8 ELSE f:11 END [as=upsert_f:16, outer=(8,9,11)] └── CASE WHEN k:9 IS NULL THEN column2:6 ELSE s:12 END [as=upsert_s:17, outer=(6,9,12)] @@ -2326,6 +2363,7 @@ project ├── fd: (8)-->(1-7) └── project ├── columns: a_new:17 a:9 b:10 c:11 d:12 e:13 f:14 g:15 rowid:16!null + ├── immutable ├── key: (16) ├── fd: (16)-->(9-15), (9)~~>(10-16), (9)-->(17) ├── scan returning_test @@ -2333,7 +2371,7 @@ project │ ├── key: (16) │ └── fd: (16)-->(9-15), (9)~~>(10-16) └── projections - └── a:9 + 1 [as=a_new:17, outer=(9)] + └── a:9 + 1 [as=a_new:17, outer=(9), immutable] # Fetch all the columns in the (d, e, f, g) family as d is being set. @@ -2355,6 +2393,7 @@ project ├── fd: (8)-->(1,4), (1)~~>(4,8) └── project ├── columns: d_new:17 a:9 d:12 e:13 f:14 g:15 rowid:16!null + ├── immutable ├── key: (16) ├── fd: (16)-->(9,12-15), (9)~~>(12-16), (9,12)-->(17) ├── scan returning_test @@ -2362,7 +2401,7 @@ project │ ├── key: (16) │ └── fd: (16)-->(9,12-15), (9)~~>(12-16) └── projections - └── a:9 + d:12 [as=d_new:17, outer=(9,12)] + └── a:9 + d:12 [as=d_new:17, outer=(9,12), immutable] # Fetch only whats being updated (not the (d, e, f, g) family). norm @@ -2381,6 +2420,7 @@ project ├── fd: (8)-->(1) └── project ├── columns: a_new:17 a:9 rowid:16!null + ├── immutable ├── key: (16) ├── fd: (16)-->(9,17), (9)~~>(16,17) ├── scan returning_test @@ -2388,7 +2428,7 @@ project │ ├── key: (16) │ └── fd: (16)-->(9,12), (9)~~>(12,16) └── projections - └── a:9 + d:12 [as=a_new:17, outer=(9,12)] + └── a:9 + d:12 [as=a_new:17, outer=(9,12), immutable] # We only fetch the minimal set of columns which is (a, b, c, rowid). norm @@ -2410,6 +2450,7 @@ project ├── fd: (8)-->(1-3), (2)~~>(1,3,8) └── project ├── columns: a_new:17 a:9 b:10 c:11 rowid:16!null + ├── immutable ├── key: (16) ├── fd: (16)-->(9-11), (9)~~>(10,11,16), (9,10)-->(17) ├── scan returning_test @@ -2417,7 +2458,7 @@ project │ ├── key: (16) │ └── fd: (16)-->(9-11), (9)~~>(10,11,16) └── projections - └── a:9 + b:10 [as=a_new:17, outer=(9,10)] + └── a:9 + b:10 [as=a_new:17, outer=(9,10), immutable] # We apply the PruneMutationReturnCols rule multiple times, to get @@ -2442,6 +2483,7 @@ with &1 │ ├── fd: (8)-->(1-3) │ └── project │ ├── columns: a_new:17 returning_test.a:9 returning_test.b:10 returning_test.c:11 rowid:16!null + │ ├── immutable │ ├── key: (16) │ ├── fd: (16)-->(9-11), (9)~~>(10,11,16), (9)-->(17) │ ├── scan returning_test @@ -2449,7 +2491,7 @@ with &1 │ │ ├── key: (16) │ │ └── fd: (16)-->(9-11), (9)~~>(10,11,16) │ └── projections - │ └── returning_test.a:9 + 1 [as=a_new:17, outer=(9)] + │ └── returning_test.a:9 + 1 [as=a_new:17, outer=(9), immutable] └── project ├── columns: a:21 ├── with-scan &1 @@ -2482,6 +2524,7 @@ with &1 │ ├── fd: (8)-->(1-3) │ └── project │ ├── columns: a_new:17 returning_test.a:9 returning_test.b:10 returning_test.c:11 rowid:16!null + │ ├── immutable │ ├── key: (16) │ ├── fd: (16)-->(9-11), (9)~~>(10,11,16), (9)-->(17) │ ├── scan returning_test @@ -2489,7 +2532,7 @@ with &1 │ │ ├── key: (16) │ │ └── fd: (16)-->(9-11), (9)~~>(10,11,16) │ └── projections - │ └── returning_test.a:9 + 1 [as=a_new:17, outer=(9)] + │ └── returning_test.a:9 + 1 [as=a_new:17, outer=(9), immutable] └── project ├── columns: a:21!null ├── select @@ -2528,6 +2571,7 @@ with &2 │ ├── fd: (18)-->(11-13) │ └── project │ ├── columns: a_new:27 returning_test.a:19 returning_test.b:20 returning_test.c:21 rowid:26!null + │ ├── immutable │ ├── key: (26) │ ├── fd: (26)-->(19-21), (19)~~>(20,21,26), (19)-->(27) │ ├── scan returning_test @@ -2535,7 +2579,7 @@ with &2 │ │ ├── key: (26) │ │ └── fd: (26)-->(19-21), (19)~~>(20,21,26) │ └── projections - │ └── returning_test.a:19 + 1 [as=a_new:27, outer=(19)] + │ └── returning_test.a:19 + 1 [as=a_new:27, outer=(19), immutable] └── inner-join (cross) ├── columns: a:9 b:10 a:31!null b:32 ├── fd: (9)~~>(10) @@ -2633,7 +2677,7 @@ project │ │ └── a:14 = 1 [outer=(14), constraints=(/14: [/1 - /1]; tight), fd=()-->(14)] │ └── filters (true) └── projections - ├── CASE WHEN rowid:21 IS NULL THEN column1:9 ELSE column1:9 + a:14 END [as=upsert_a:23, outer=(9,14,21)] + ├── CASE WHEN rowid:21 IS NULL THEN column1:9 ELSE column1:9 + a:14 END [as=upsert_a:23, outer=(9,14,21), immutable] ├── CASE WHEN rowid:21 IS NULL THEN column2:10 ELSE b:15 END [as=upsert_b:24, outer=(10,15,21)] ├── CASE WHEN rowid:21 IS NULL THEN column3:11 ELSE c:16 END [as=upsert_c:25, outer=(11,16,21)] └── CASE WHEN rowid:21 IS NULL THEN column13:13 ELSE rowid:21 END [as=upsert_rowid:30, outer=(13,21)] @@ -2654,6 +2698,7 @@ project ├── fd: (8)-->(1,2,4), (1)-->(2,4,8) └── select ├── columns: a:9!null b:10 d:12 rowid:16!null + ├── immutable ├── key: (16) ├── fd: (16)-->(9,10,12), (9)-->(10,12,16) ├── scan returning_test @@ -2661,7 +2706,7 @@ project │ ├── key: (16) │ └── fd: (16)-->(9,10,12), (9)~~>(10,12,16) └── filters - └── a:9 < (b:10 + d:12) [outer=(9,10,12), constraints=(/9: (/NULL - ])] + └── a:9 < (b:10 + d:12) [outer=(9,10,12), immutable, constraints=(/9: (/NULL - ])] norm UPSERT INTO returning_test (a, b, c) VALUES (1, 2, 'c') RETURNING a, b, c, d @@ -2811,6 +2856,7 @@ SELECT a, b, c FROM abcde WHERE EXISTS (SELECT * FROM family WHERE abcde.a=famil ---- semi-join (hash) ├── columns: a:1!null b:2 c:3 + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) ├── scan abcde @@ -2823,7 +2869,7 @@ semi-join (hash) │ └── fd: (6)-->(7,8) └── filters ├── abcde.a:1 = "family".a:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] - └── abcde.b:2 > ("family".b:7 + "family".c:8) [outer=(2,7,8), constraints=(/2: (/NULL - ])] + └── abcde.b:2 > ("family".b:7 + "family".c:8) [outer=(2,7,8), immutable, constraints=(/2: (/NULL - ])] norm expect=PruneSemiAntiJoinRightCols SELECT a, b, c FROM abcde WHERE NOT EXISTS (SELECT * FROM family WHERE abcde.a=family.a) diff --git a/pkg/sql/opt/norm/testdata/rules/reject_nulls b/pkg/sql/opt/norm/testdata/rules/reject_nulls index de7cbf3b88cf..a32bbef2785d 100644 --- a/pkg/sql/opt/norm/testdata/rules/reject_nulls +++ b/pkg/sql/opt/norm/testdata/rules/reject_nulls @@ -256,9 +256,11 @@ HAVING sum(DISTINCT y)=1 ---- project ├── columns: sum:7!null + ├── immutable ├── fd: ()-->(7) └── select ├── columns: k:1!null sum:7!null + ├── immutable ├── key: (1) ├── fd: ()-->(7) ├── group-by @@ -283,7 +285,7 @@ project │ └── sum │ └── y:6 └── filters - └── sum:7 = 1 [outer=(7), constraints=(/7: [/1 - /1]; tight), fd=()-->(7)] + └── sum:7 = 1 [outer=(7), immutable, constraints=(/7: [/1 - /1]; tight), fd=()-->(7)] # Single max aggregate function without grouping columns. norm expect=RejectNullsGroupBy @@ -651,18 +653,22 @@ HAVING string_agg(s || 'bar', ',')='foo' ---- project ├── columns: string_agg:9!null + ├── immutable ├── fd: ()-->(9) └── select ├── columns: k:3 string_agg:9!null + ├── immutable ├── key: (3) ├── fd: ()-->(9) ├── group-by │ ├── columns: k:3 string_agg:9 │ ├── grouping columns: k:3 + │ ├── immutable │ ├── key: (3) │ ├── fd: (3)-->(9) │ ├── project │ │ ├── columns: column7:7 column8:8!null k:3 + │ │ ├── immutable │ │ ├── fd: ()-->(8), (3)-->(7) │ │ ├── left-join (cross) │ │ │ ├── columns: k:3 s:6 @@ -675,7 +681,7 @@ project │ │ │ │ └── fd: (3)-->(6) │ │ │ └── filters (true) │ │ └── projections - │ │ ├── s:6 || 'bar' [as=column7:7, outer=(6)] + │ │ ├── s:6 || 'bar' [as=column7:7, outer=(6), immutable] │ │ └── ',' [as=column8:8] │ └── aggregations │ └── string-agg [as=string_agg:9, outer=(7,8)] @@ -716,15 +722,18 @@ exprnorm select ├── columns: sum:6!null ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) ├── scalar-group-by │ ├── columns: sum:6 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(6) │ ├── inner-join-apply │ │ ├── columns: x:1!null u:3!null z:5 + │ │ ├── immutable │ │ ├── key: (1,3) │ │ ├── fd: (1,3)-->(5) │ │ ├── scan xy @@ -733,6 +742,7 @@ select │ │ ├── left-join-apply │ │ │ ├── columns: u:3!null z:5 │ │ │ ├── outer: (1) + │ │ │ ├── immutable │ │ │ ├── key: (3) │ │ │ ├── fd: (3)-->(5) │ │ │ ├── scan uv @@ -742,6 +752,7 @@ select │ │ │ │ ├── columns: z:5 │ │ │ │ ├── outer: (1,3) │ │ │ │ ├── cardinality: [1 - 1] + │ │ │ │ ├── immutable │ │ │ │ ├── key: () │ │ │ │ ├── fd: ()-->(5) │ │ │ │ └── (x:1 + u:3,) diff --git a/pkg/sql/opt/norm/testdata/rules/scalar b/pkg/sql/opt/norm/testdata/rules/scalar index 724008efdfc5..67b7f53cf36b 100644 --- a/pkg/sql/opt/norm/testdata/rules/scalar +++ b/pkg/sql/opt/norm/testdata/rules/scalar @@ -28,20 +28,21 @@ FROM a ---- project ├── columns: r:7 s:8 t:9!null u:10!null v:11 w:12 x:13 y:14 z:15 + ├── immutable ├── scan a │ ├── columns: k:1!null i:2 │ ├── key: (1) │ └── fd: (1)-->(2) └── projections - ├── k:1 = (i:2 + 1) [as=r:7, outer=(1,2)] - ├── i:2 != (2 - k:1) [as=s:8, outer=(1,2)] - ├── k:1 IS NOT DISTINCT FROM (i:2 + 1) [as=t:9, outer=(1,2)] - ├── k:1 IS DISTINCT FROM (i:2 - 1) [as=u:10, outer=(1,2)] - ├── k:1 + (i:2 * 2) [as=v:11, outer=(1,2)] - ├── k:1 * (i:2 + 2) [as=w:12, outer=(1,2)] - ├── k:1 & (i:2 ^ 2) [as=x:13, outer=(1,2)] - ├── k:1 | (i:2 ^ 2) [as=y:14, outer=(1,2)] - └── k:1 # (i:2 * i:2) [as=z:15, outer=(1,2)] + ├── k:1 = (i:2 + 1) [as=r:7, outer=(1,2), immutable] + ├── i:2 != (2 - k:1) [as=s:8, outer=(1,2), immutable] + ├── k:1 IS NOT DISTINCT FROM (i:2 + 1) [as=t:9, outer=(1,2), immutable] + ├── k:1 IS DISTINCT FROM (i:2 - 1) [as=u:10, outer=(1,2), immutable] + ├── k:1 + (i:2 * 2) [as=v:11, outer=(1,2), immutable] + ├── k:1 * (i:2 + 2) [as=w:12, outer=(1,2), immutable] + ├── k:1 & (i:2 ^ 2) [as=x:13, outer=(1,2), immutable] + ├── k:1 | (i:2 ^ 2) [as=y:14, outer=(1,2), immutable] + └── k:1 # (i:2 * i:2) [as=z:15, outer=(1,2), immutable] # -------------------------------------------------- # CommuteConst @@ -62,20 +63,21 @@ FROM a ---- project ├── columns: r:7 s:8 t:9!null u:10!null v:11 w:12 x:13 y:14 z:15!null + ├── immutable ├── scan a │ ├── columns: k:1!null i:2 f:3 │ ├── key: (1) │ └── fd: (1)-->(2,3) └── projections - ├── (i:2 + k:1) = 4 [as=r:7, outer=(1,2)] - ├── (i:2 * 2) != 3 [as=s:8, outer=(2)] - ├── (1 - k:1) IS NOT DISTINCT FROM 5 [as=t:9, outer=(1)] + ├── (i:2 + k:1) = 4 [as=r:7, outer=(1,2), immutable] + ├── (i:2 * 2) != 3 [as=s:8, outer=(2), immutable] + ├── (1 - k:1) IS NOT DISTINCT FROM 5 [as=t:9, outer=(1), immutable] ├── k:1 IS DISTINCT FROM 11 [as=u:10, outer=(1)] - ├── f:3 + 1.0 [as=v:11, outer=(3)] - ├── (i:2 * i:2) * 15 [as=w:12, outer=(2)] - ├── (i:2 + i:2) & 10000 [as=x:13, outer=(2)] - ├── (i:2 + i:2) | 4 [as=y:14, outer=(2)] - └── (k:1 ^ 2) # -2 [as=z:15, outer=(1)] + ├── f:3 + 1.0 [as=v:11, outer=(3), immutable] + ├── (i:2 * i:2) * 15 [as=w:12, outer=(2), immutable] + ├── (i:2 + i:2) & 10000 [as=x:13, outer=(2), immutable] + ├── (i:2 + i:2) | 4 [as=y:14, outer=(2), immutable] + └── (k:1 ^ 2) # -2 [as=z:15, outer=(1), immutable] # -------------------------------------------------- # EliminateCoalesce @@ -119,10 +121,11 @@ SELECT COALESCE(NULL, NULL, s, s || 'foo') FROM a ---- project ├── columns: coalesce:7 + ├── immutable ├── scan a │ └── columns: s:4 └── projections - └── COALESCE(s:4, s:4 || 'foo') [as=coalesce:7, outer=(4)] + └── COALESCE(s:4, s:4 || 'foo') [as=coalesce:7, outer=(4), immutable] # Trailing null can't be removed. norm @@ -247,6 +250,7 @@ SELECT values ├── columns: "?column?":1 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(1) └── (true IN (NULL, NULL, ('201.249.149.90/18' & '97a7:3650:3dd8:d4e9:35fe:6cfb:a714:1c17/61') << 'e22f:2067:2ed2:7b07:b167:206f:f17b:5b7d/82'),) @@ -581,6 +585,7 @@ SELECT * FROM a WHERE j->'a' = '"b"'::JSON ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 arr:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -588,13 +593,14 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── j:5 @> '{"a": "b"}' [outer=(5)] + └── j:5 @> '{"a": "b"}' [outer=(5), immutable] norm expect=NormalizeJSONFieldAccess SELECT * FROM a WHERE j->'a'->'x' = '"b"'::JSON ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 arr:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -602,7 +608,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── j:5 @> '{"a": {"x": "b"}}' [outer=(5)] + └── j:5 @> '{"a": {"x": "b"}}' [outer=(5), immutable] # The transformation is not valid in this case. norm expect-not=NormalizeJSONFieldAccess @@ -610,6 +616,7 @@ SELECT * FROM a WHERE j->2 = '"b"'::JSON ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 arr:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -617,7 +624,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── (j:5->2) = '"b"' [outer=(5)] + └── (j:5->2) = '"b"' [outer=(5), immutable] # The transformation is not valid in this case, since j->'a' could be an array. norm expect-not=NormalizeJSONFieldAccess @@ -625,6 +632,7 @@ SELECT * FROM a WHERE j->'a' @> '"b"'::JSON ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 arr:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -632,7 +640,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── (j:5->'a') @> '"b"' [outer=(5)] + └── (j:5->'a') @> '"b"' [outer=(5), immutable] # The transformation is not valid in this case, since containment doesn't imply # equality for non-scalars. @@ -641,11 +649,12 @@ SELECT j->'a' = '["b"]'::JSON, j->'a' = '{"b": "c"}'::JSON FROM a ---- project ├── columns: "?column?":7 "?column?":8 + ├── immutable ├── scan a │ └── columns: j:5 └── projections - ├── (j:5->'a') = '["b"]' [as="?column?":7, outer=(5)] - └── (j:5->'a') = '{"b": "c"}' [as="?column?":8, outer=(5)] + ├── (j:5->'a') = '["b"]' [as="?column?":7, outer=(5), immutable] + └── (j:5->'a') = '{"b": "c"}' [as="?column?":8, outer=(5), immutable] # -------------------------------------------------- # NormalizeJSONContains @@ -656,6 +665,7 @@ SELECT * FROM a WHERE j->'a' @> '{"x": "b"}'::JSON ---- select ├── columns: k:1!null i:2 f:3 s:4 j:5 arr:6 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-6) ├── scan a @@ -663,7 +673,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-6) └── filters - └── j:5 @> '{"a": {"x": "b"}}' [outer=(5)] + └── j:5 @> '{"a": {"x": "b"}}' [outer=(5), immutable] # -------------------------------------------------- # SimplifyCaseWhenConstValue @@ -1072,9 +1082,11 @@ SELECT k FROM e WHERE tz > '2017-11-12 07:35:01+00:00'::TIMESTAMP ---- project ├── columns: k:1!null + ├── stable ├── key: (1) └── select ├── columns: k:1!null tz:4!null + ├── stable ├── key: (1) ├── fd: (1)-->(4) ├── scan e @@ -1082,7 +1094,7 @@ project │ ├── key: (1) │ └── fd: (1)-->(4) └── filters - └── tz:4 > '2017-11-12 07:35:01+00:00' [outer=(4), constraints=(/4: (/NULL - ])] + └── tz:4 > '2017-11-12 07:35:01+00:00' [outer=(4), stable, constraints=(/4: (/NULL - ])] norm expect=UnifyComparisonTypes SELECT k FROM e WHERE tz > '2017-11-12 07:35:01+00:00'::TIMESTAMP @@ -1127,9 +1139,11 @@ SELECT k FROM e WHERE d > '2018-07-01' AND d < '2018-07-01'::DATE + '1w1s'::INTE ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── select ├── columns: k:1!null d:5!null + ├── immutable ├── key: (1) ├── fd: (1)-->(5) ├── scan e @@ -1138,7 +1152,7 @@ project │ └── fd: (1)-->(5) └── filters ├── d:5 > '2018-07-01' [outer=(5), constraints=(/5: [/'2018-07-02' - ]; tight)] - └── d:5 < '2018-07-08 00:00:01+00:00' [outer=(5), constraints=(/5: (/NULL - ])] + └── d:5 < '2018-07-08 00:00:01+00:00' [outer=(5), immutable, constraints=(/5: (/NULL - ])] # NULL value. norm @@ -1190,12 +1204,13 @@ SELECT k FROM a WHERE k IN (VALUES ((SELECT k*i FROM a)), (2), (3)) ---- select ├── columns: k:1!null + ├── immutable ├── key: (1) ├── scan a │ ├── columns: k:1!null │ └── key: (1) └── filters - └── in [outer=(1), subquery] + └── in [outer=(1), immutable, subquery] ├── k:1 └── tuple ├── subquery @@ -1203,16 +1218,18 @@ select │ ├── columns: "?column?":13 │ ├── error: "more than one row returned by a subquery used as an expression" │ ├── cardinality: [0 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(13) │ └── project │ ├── columns: "?column?":13 + │ ├── immutable │ ├── scan a │ │ ├── columns: k:7!null i:8 │ │ ├── key: (7) │ │ └── fd: (7)-->(8) │ └── projections - │ └── k:7 * i:8 [as="?column?":13, outer=(7,8)] + │ └── k:7 * i:8 [as="?column?":13, outer=(7,8), immutable] ├── 2 └── 3 @@ -1245,9 +1262,11 @@ SELECT k FROM a WHERE (k, i) IN (SELECT b, a FROM (VALUES (1, 1), (2, 2), (3, 3) ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── semi-join (hash) ├── columns: k:1!null column10:10 + ├── immutable ├── key: (1) ├── fd: (1)-->(10) ├── project @@ -1272,7 +1291,7 @@ project │ └── projections │ └── (column2:8, column1:7) [as=column9:9, outer=(7,8)] └── filters - └── column10:10 = column9:9 [outer=(9,10), constraints=(/9: (/NULL - ]; /10: (/NULL - ]), fd=(9)==(10), (10)==(9)] + └── column10:10 = column9:9 [outer=(9,10), immutable, constraints=(/9: (/NULL - ]; /10: (/NULL - ]), fd=(9)==(10), (10)==(9)] # -------------------------------------------------- # SimplifyEqualsAnyTuple diff --git a/pkg/sql/opt/norm/testdata/rules/select b/pkg/sql/opt/norm/testdata/rules/select index bc00ba6b0167..a113435cc086 100644 --- a/pkg/sql/opt/norm/testdata/rules/select +++ b/pkg/sql/opt/norm/testdata/rules/select @@ -270,9 +270,11 @@ SELECT k FROM e WHERE d > '2018-07-01' AND d < '2018-07-01'::DATE + '1w1s'::INTE ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── select ├── columns: k:1!null d:5!null + ├── immutable ├── key: (1) ├── fd: (1)-->(5) ├── scan e @@ -281,7 +283,7 @@ project │ └── fd: (1)-->(5) └── filters ├── d:5 > '2018-07-01' [outer=(5), constraints=(/5: [/'2018-07-02' - ]; tight)] - └── d:5 < '2018-07-08 00:00:01+00:00' [outer=(5), constraints=(/5: (/NULL - ])] + └── d:5 < '2018-07-08 00:00:01+00:00' [outer=(5), immutable, constraints=(/5: (/NULL - ])] # Ranges can be merged with other filters to create new ranges. norm expect=ConsolidateSelectFilters disable=InlineConstVar @@ -533,6 +535,7 @@ SELECT * FROM (SELECT i, i+1 AS r, f FROM a) a WHERE f=10.0 ---- project ├── columns: i:2 r:6 f:3!null + ├── immutable ├── fd: ()-->(3), (2)-->(6) ├── select │ ├── columns: i:2 f:3!null @@ -542,7 +545,7 @@ project │ └── filters │ └── f:3 = 10.0 [outer=(3), constraints=(/3: [/10.0 - /10.0]; tight), fd=()-->(3)] └── projections - └── i:2 + 1 [as=r:6, outer=(2)] + └── i:2 + 1 [as=r:6, outer=(2), immutable] # Don't push down select if it depends on computed column that can't be inlined. norm expect-not=PushSelectIntoProject @@ -550,6 +553,7 @@ SELECT * FROM (SELECT i, i/2 div, f FROM a) a WHERE div=2 ---- select ├── columns: i:2 div:6!null f:3 + ├── immutable ├── fd: ()-->(6) ├── project │ ├── columns: div:6 i:2 f:3 @@ -559,7 +563,7 @@ select │ └── projections │ └── i:2 / 2 [as=div:6, outer=(2)] └── filters - └── div:6 = 2 [outer=(6), constraints=(/6: [/2 - /2]; tight), fd=()-->(6)] + └── div:6 = 2 [outer=(6), immutable, constraints=(/6: [/2 - /2]; tight), fd=()-->(6)] # Push down some conjuncts, but not others. norm expect=PushSelectIntoProject @@ -567,6 +571,7 @@ SELECT * FROM (SELECT i, i/2 div, f FROM a) a WHERE 10.0=f AND 2=div AND i=1 ---- select ├── columns: i:2!null div:6!null f:3!null + ├── immutable ├── fd: ()-->(2,3,6) ├── project │ ├── columns: div:6!null i:2!null f:3!null @@ -582,7 +587,7 @@ select │ └── projections │ └── i:2 / 2 [as=div:6, outer=(2)] └── filters - └── div:6 = 2 [outer=(6), constraints=(/6: [/2 - /2]; tight), fd=()-->(6)] + └── div:6 = 2 [outer=(6), immutable, constraints=(/6: [/2 - /2]; tight), fd=()-->(6)] # Detect PushSelectIntoProject and FilterUnusedSelectCols dependency cycle. norm @@ -590,6 +595,7 @@ SELECT f, f+1.1 AS r FROM (SELECT f, i FROM a GROUP BY f, i HAVING sum(f)=10.0) ---- project ├── columns: f:3 r:7 + ├── immutable ├── select │ ├── columns: i:2 f:3 sum:6!null │ ├── key: (2,3) @@ -607,7 +613,7 @@ project │ └── filters │ └── sum:6 = 10.0 [outer=(6), constraints=(/6: [/10.0 - /10.0]; tight), fd=()-->(6)] └── projections - └── f:3 + 1.1 [as=r:7, outer=(3)] + └── f:3 + 1.1 [as=r:7, outer=(3), immutable] # -------------------------------------- # PushSelectCondLeftIntoJoinLeftAndRight @@ -1395,9 +1401,11 @@ SELECT k FROM b WHERE i+k IS NOT NULL ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── select ├── columns: k:1!null i:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── scan b @@ -1405,7 +1413,7 @@ project │ ├── key: (1) │ └── fd: (1)-->(2) └── filters - └── (i:2 + k:1) IS NOT NULL [outer=(1,2)] + └── (i:2 + k:1) IS NOT NULL [outer=(1,2), immutable] # -------------------------------------------------- # DetectSelectContradiction diff --git a/pkg/sql/opt/norm/testdata/rules/with b/pkg/sql/opt/norm/testdata/rules/with index 378ec83f101a..9b61b82b4fad 100644 --- a/pkg/sql/opt/norm/testdata/rules/with +++ b/pkg/sql/opt/norm/testdata/rules/with @@ -137,6 +137,7 @@ WITH foo AS (SELECT 1), bar AS (SELECT 2) SELECT (SELECT * FROM foo) + (SELECT * values ├── columns: "?column?":5 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(5) └── tuple @@ -162,6 +163,7 @@ WITH foo AS (SELECT 1), bar AS (SELECT 2) SELECT (SELECT * FROM foo) + (SELECT * with &2 (bar) ├── columns: "?column?":6 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) ├── values @@ -173,6 +175,7 @@ with &2 (bar) └── values ├── columns: "?column?":6 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) └── tuple @@ -647,6 +650,7 @@ with &2 (cte) ├── left columns: c:7(int) ├── right columns: "?column?":9(int) ├── cardinality: [1 - 2] + ├── immutable ├── stats: [rows=2, distinct(10)=2, null(10)=0] ├── cost: 0.1 ├── key: (10) @@ -663,6 +667,7 @@ with &2 (cte) └── project ├── columns: "?column?":9(int!null) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1, distinct(9)=1, null(9)=0] ├── cost: 0.04 ├── key: () @@ -679,7 +684,7 @@ with &2 (cte) │ ├── fd: ()-->(8) │ └── prune: (8) └── projections - └── plus [as="?column?":9, type=int, outer=(8)] + └── plus [as="?column?":9, type=int, outer=(8), immutable] ├── variable: c:8 [type=int] └── const: 1 [type=int] @@ -800,6 +805,7 @@ with &2 (t) ├── columns: sum:6 ├── materialized ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) ├── recursive-c-t-e @@ -808,6 +814,7 @@ with &2 (t) │ ├── initial columns: column1:1 │ ├── recursive columns: "?column?":4 │ ├── cardinality: [1 - ] + │ ├── immutable │ ├── values │ │ ├── columns: column1:1!null │ │ ├── cardinality: [1 - 1] @@ -816,6 +823,7 @@ with &2 (t) │ │ └── (1,) │ └── project │ ├── columns: "?column?":4!null + │ ├── immutable │ ├── select │ │ ├── columns: n:3!null │ │ ├── with-scan &1 (t) @@ -826,7 +834,7 @@ with &2 (t) │ │ └── filters │ │ └── n:3 < 100 [outer=(3), constraints=(/3: (/NULL - /99]; tight)] │ └── projections - │ └── n:3 + 1 [as="?column?":4, outer=(3)] + │ └── n:3 + 1 [as="?column?":4, outer=(3), immutable] └── scalar-group-by ├── columns: sum:6 ├── cardinality: [1 - 1] @@ -848,17 +856,20 @@ WITH RECURSIVE t(n) AS NOT MATERIALIZED (VALUES (1) UNION ALL SELECT n+1 FROM t scalar-group-by ├── columns: sum:6 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(6) ├── project │ ├── columns: n:5 │ ├── cardinality: [1 - ] + │ ├── immutable │ ├── recursive-c-t-e │ │ ├── columns: n:2 │ │ ├── working table binding: &1 │ │ ├── initial columns: column1:1 │ │ ├── recursive columns: "?column?":4 │ │ ├── cardinality: [1 - ] + │ │ ├── immutable │ │ ├── values │ │ │ ├── columns: column1:1!null │ │ │ ├── cardinality: [1 - 1] @@ -867,6 +878,7 @@ scalar-group-by │ │ │ └── (1,) │ │ └── project │ │ ├── columns: "?column?":4!null + │ │ ├── immutable │ │ ├── select │ │ │ ├── columns: n:3!null │ │ │ ├── with-scan &1 (t) @@ -877,7 +889,7 @@ scalar-group-by │ │ │ └── filters │ │ │ └── n:3 < 100 [outer=(3), constraints=(/3: (/NULL - /99]; tight)] │ │ └── projections - │ │ └── n:3 + 1 [as="?column?":4, outer=(3)] + │ │ └── n:3 + 1 [as="?column?":4, outer=(3), immutable] │ └── projections │ └── n:2 [as=n:5, outer=(2)] └── aggregations diff --git a/pkg/sql/opt/optbuilder/builder.go b/pkg/sql/opt/optbuilder/builder.go index 137066619bd4..bd4f8f7e4298 100644 --- a/pkg/sql/opt/optbuilder/builder.go +++ b/pkg/sql/opt/optbuilder/builder.go @@ -231,7 +231,7 @@ func (b *Builder) buildStmt( stmt tree.Statement, desiredTypes []*types.T, inScope *scope, ) (outScope *scope) { if b.insideViewDef { - // A black list of statements that can't be used from inside a view. + // A blocklist of statements that can't be used from inside a view. switch stmt := stmt.(type) { case *tree.Delete, *tree.Insert, *tree.Update, *tree.CreateTable, *tree.CreateView, *tree.Split, *tree.Unsplit, *tree.Relocate, @@ -338,3 +338,18 @@ func (b *Builder) allocScope() *scope { r.builder = b return r } + +// trackReferencedColumnForViews is used to add a column to the view's +// dependencies. This should be called whenever a column reference is made in a +// view query. +func (b *Builder) trackReferencedColumnForViews(col *scopeColumn) { + if b.trackViewDeps { + for i := range b.viewDeps { + dep := b.viewDeps[i] + if ord, ok := dep.ColumnIDToOrd[col.id]; ok { + dep.ColumnOrdinals.Add(ord) + } + b.viewDeps[i] = dep + } + } +} diff --git a/pkg/sql/opt/optbuilder/join.go b/pkg/sql/opt/optbuilder/join.go index 1db24fca4c7e..901dfb1065a3 100644 --- a/pkg/sql/opt/optbuilder/join.go +++ b/pkg/sql/opt/optbuilder/join.go @@ -340,6 +340,8 @@ func (jb *usingJoinBuilder) buildUsingJoin(using *tree.UsingJoinCond) { jb.raiseUndefinedColError(name, "right") } + jb.b.trackReferencedColumnForViews(leftCol) + jb.b.trackReferencedColumnForViews(rightCol) jb.addEqualityCondition(leftCol, rightCol) } @@ -370,6 +372,8 @@ func (jb *usingJoinBuilder) buildNaturalJoin(natural tree.NaturalJoinCond) { rightCol := jb.findUsingColumn(jb.rightScope.cols, leftCol.name, "right table") if rightCol != nil { + jb.b.trackReferencedColumnForViews(leftCol) + jb.b.trackReferencedColumnForViews(rightCol) jb.addEqualityCondition(leftCol, rightCol) } } diff --git a/pkg/sql/opt/optbuilder/project.go b/pkg/sql/opt/optbuilder/project.go index 73e7e021d72e..4fc872d8d4cd 100644 --- a/pkg/sql/opt/optbuilder/project.go +++ b/pkg/sql/opt/optbuilder/project.go @@ -225,7 +225,9 @@ func (b *Builder) finishBuildScalar( } // Avoid synthesizing a new column if possible. - if col := outScope.findExistingCol(texpr, false /* allowSideEffects */); col != nil && col != outCol { + if col := outScope.findExistingCol( + texpr, false, /* allowSideEffects */ + ); col != nil && col != outCol { outCol.id = col.id outCol.scalar = scalar return scalar @@ -253,6 +255,8 @@ func (b *Builder) finishBuildScalar( func (b *Builder) finishBuildScalarRef( col *scopeColumn, inScope, outScope *scope, outCol *scopeColumn, colRefs *opt.ColSet, ) (out opt.ScalarExpr) { + + b.trackReferencedColumnForViews(col) // Update the sets of column references and outer columns if needed. if colRefs != nil { colRefs.Add(col.id) diff --git a/pkg/sql/opt/optbuilder/scope.go b/pkg/sql/opt/optbuilder/scope.go index c275c9282298..bd0340f7d076 100644 --- a/pkg/sql/opt/optbuilder/scope.go +++ b/pkg/sql/opt/optbuilder/scope.go @@ -574,8 +574,14 @@ func findExistingColInList( // findExistingCol finds the given expression among the bound variables in this // scope. Returns nil if the expression is not found (or an expression is found // but it has side-effects and allowSideEffects is false). +// If a column is found and we are tracking view dependencies, we add the column +// to the view dependencies since it means this column is being referenced. func (s *scope) findExistingCol(expr tree.TypedExpr, allowSideEffects bool) *scopeColumn { - return findExistingColInList(expr, s.cols, allowSideEffects) + col := findExistingColInList(expr, s.cols, allowSideEffects) + if col != nil { + s.builder.trackReferencedColumnForViews(col) + } + return col } // startAggFunc is called when the builder starts building an aggregate diff --git a/pkg/sql/opt/optbuilder/select.go b/pkg/sql/opt/optbuilder/select.go index 8c33d2785267..164e6a016294 100644 --- a/pkg/sql/opt/optbuilder/select.go +++ b/pkg/sql/opt/optbuilder/select.go @@ -531,8 +531,11 @@ func (b *Builder) buildScan( if b.trackViewDeps { dep := opt.ViewDep{DataSource: tab} - for i := 0; i < colCount; i++ { - dep.ColumnOrdinals.Add(getOrdinal(i)) + dep.ColumnIDToOrd = make(map[opt.ColumnID]int) + // We will track the ColumnID to Ord mapping so Ords can be added + // when a column is referenced. + for i, col := range outScope.cols { + dep.ColumnIDToOrd[col.id] = getOrdinal(i) } if private.Flags.ForceIndex { dep.SpecificIndex = true @@ -996,6 +999,7 @@ func (b *Builder) buildSelectClause( inScope *scope, ) (outScope *scope) { fromScope := b.buildFrom(sel.From, locking, inScope) + b.processWindowDefs(sel, fromScope) b.buildWhere(sel.Where, fromScope) diff --git a/pkg/sql/opt/optbuilder/testdata/create_view b/pkg/sql/opt/optbuilder/testdata/create_view index 6738aea2e07c..7ecd9e906a72 100644 --- a/pkg/sql/opt/optbuilder/testdata/create_view +++ b/pkg/sql/opt/optbuilder/testdata/create_view @@ -20,13 +20,13 @@ create-view t.public.v1 └── dependencies build -CREATE VIEW v1 AS SELECT a FROM ab +CREATE VIEW v1 AS SELECT a FROM ab ---- create-view t.public.v1 ├── SELECT a FROM t.public.ab ├── columns: a:1 └── dependencies - └── ab [columns: (0,1)] + └── ab [columns: a] # Test dependency on specific index. build @@ -36,7 +36,7 @@ create-view t.public.v1 ├── SELECT a FROM t.public.ab@idx ├── columns: a:1 └── dependencies - └── ab@idx [columns: (0,1)] + └── ab@idx [columns: a] build CREATE VIEW v1 AS SELECT a FROM ab@primary @@ -45,7 +45,7 @@ create-view t.public.v1 ├── SELECT a FROM t.public.ab@primary ├── columns: a:1 └── dependencies - └── ab@primary [columns: (0,1)] + └── ab@primary [columns: a] # Test dependency on view. exec-ddl @@ -53,13 +53,13 @@ CREATE VIEW av AS SELECT a FROM ab ---- build -CREATE VIEW v1 AS SELECT a FROM av +CREATE VIEW v1 AS SELECT a FROM av ---- create-view t.public.v1 ├── SELECT a FROM t.public.av ├── columns: a:1 └── dependencies - └── av [columns: (0)] + └── av build CREATE VIEW v1 AS SELECT av.a, ab.a FROM av, ab @@ -68,8 +68,8 @@ create-view t.public.v1 ├── SELECT av.a, ab.a FROM t.public.av, t.public.ab ├── columns: a:1 a:3 └── dependencies - ├── av [columns: (0)] - └── ab [columns: (0,1)] + ├── av + └── ab [columns: a] # Test that we don't report virtual table dependencies. build @@ -79,7 +79,7 @@ create-view t.public.v1 ├── SELECT a, table_schema FROM t.public.ab, "".information_schema.columns ├── columns: a:1 table_schema:5 └── dependencies - └── ab [columns: (0,1)] + └── ab [columns: a] # Test cases with specified column names. build @@ -89,9 +89,9 @@ create-view t.public.v2 ├── SELECT ab.a FROM t.public.ab, t.public.ab AS ab2, t.public.cd ├── columns: x:1 └── dependencies - ├── ab [columns: (0,1)] - ├── ab [columns: (0,1)] - └── cd [columns: (0,1)] + ├── ab [columns: a] + ├── ab [no columns] + └── cd [no columns] build CREATE VIEW v3 (x, y) AS SELECT a FROM ab @@ -116,7 +116,7 @@ create-view t.public.v5 ├── SELECT a FROM [53 AS t] ├── columns: a:1 └── dependencies - └── ab [columns: (0,1)] + └── ab [columns: a] # Verify that we only depend on the specified column. build @@ -126,7 +126,7 @@ create-view t.public.v6 ├── SELECT a FROM [53(1) AS t] ├── columns: a:1 └── dependencies - └── ab [columns: (0)] + └── ab [columns: a] # Verify dependency on sequence. build @@ -148,7 +148,7 @@ create-view t.public.v8 ├── WITH cd AS (SELECT a, b FROM t.public.ab) SELECT a + b FROM cd ├── columns: "?column?":5 └── dependencies - └── ab [columns: (0,1)] + └── ab [columns: a b] # Verify that we disallow mutation statements. build @@ -165,3 +165,161 @@ build CREATE VIEW v9 AS SELECT a,b FROM [DELETE FROM ab WHERE a>b RETURNING a, b] ---- error (42601): DELETE cannot be used inside a view definition + +# Regression 29021. + +# Dependencies should be tracked in the group by clause. +build +CREATE VIEW v10 AS SELECT a FROM ab GROUP BY a,b +---- +create-view t.public.v10 + ├── SELECT a FROM t.public.ab GROUP BY a, b + ├── columns: a:1 + └── dependencies + └── ab [columns: a b] + +# Dependencies should be tracked in the join on clause. +build +CREATE VIEW v10 as SELECT 1 FROM ab JOIN cd ON ab.a = cd.c +---- +create-view t.public.v10 + ├── SELECT 1 FROM t.public.ab JOIN t.public.cd ON ab.a = cd.c + ├── columns: "?column?":5 + └── dependencies + ├── ab [columns: a] + └── cd [columns: c] + +exec-ddl +CREATE TABLE ac (a INT, c INT) +---- + +# Dependencies should be tracked in a natural join clause. +build +CREATE VIEW v11 as SELECT 1 FROM ab NATURAL JOIN ac +---- +create-view t.public.v11 + ├── SELECT 1 FROM t.public.ab NATURAL JOIN t.public.ac + ├── columns: "?column?":6 + └── dependencies + ├── ab [columns: a] + └── ac [columns: a] + +# Dependencies should be tracked in a using join clause. +build +CREATE VIEW v12 as SELECT 1 FROM ab JOIN ac USING (a) +---- +create-view t.public.v12 + ├── SELECT 1 FROM t.public.ab JOIN t.public.ac USING (a) + ├── columns: "?column?":6 + └── dependencies + ├── ab [columns: a] + └── ac [columns: a] + +# Dependencies should be tracked in the where clause. +build +CREATE VIEW v13 AS SELECT a FROM ab WHERE b > 0 +---- +create-view t.public.v13 + ├── SELECT a FROM t.public.ab WHERE b > 0 + ├── columns: a:1 + └── dependencies + └── ab [columns: a b] + +# Dependencies should be tracked in aggregate / window functions. +build +CREATE VIEW v14 AS SELECT sum(a) FROM ab; +---- +create-view t.public.v14 + ├── SELECT sum(a) FROM t.public.ab + ├── columns: sum:3 + └── dependencies + └── ab [columns: a] + +# Dependencies should be tracked in partitions. +build +CREATE VIEW v15 AS SELECT sum(a) OVER (PARTITION by b) FROM ab; +---- +create-view t.public.v15 + ├── SELECT sum(a) OVER (PARTITION BY b) FROM t.public.ab + ├── columns: sum:3 + └── dependencies + └── ab [columns: a b] + +# Dependencies should be tracked in subqueries. +build +CREATE VIEW v16 AS SELECT a FROM (SELECT a,b FROM ab); +---- +create-view t.public.v16 + ├── SELECT a FROM (SELECT a, b FROM t.public.ab) + ├── columns: a:1 + └── dependencies + └── ab [columns: a b] + +# Dependencies should be tracked in the order by clause. +build +CREATE VIEW v16 AS SELECT a FROM ab ORDER BY b +---- +create-view t.public.v16 + ├── SELECT a FROM t.public.ab ORDER BY b + ├── columns: a:1 + └── dependencies + └── ab [columns: a b] + +exec-ddl +CREATE TABLE tf (f FLOAT) +---- + +# Dependencies should be tracked in ordered-set aggregate functions. +build +CREATE VIEW v17 AS SELECT percentile_cont(0.50) WITHIN GROUP (ORDER BY f) FROM tf +---- +create-view t.public.v17 + ├── SELECT percentile_cont(0.50) WITHIN GROUP (ORDER BY f) FROM t.public.tf + ├── columns: percentile_cont:4 + └── dependencies + └── tf [columns: f] + +# Dependencies should be tracked with multiple table statements. +build +CREATE VIEW v18 AS SELECT ab.a, ab2.b FROM ab, ab as ab2 +---- +create-view t.public.v18 + ├── SELECT ab.a, ab2.b FROM t.public.ab, t.public.ab AS ab2 + ├── columns: a:1 b:4 + └── dependencies + ├── ab [columns: a] + └── ab [columns: b] + +build +CREATE VIEW v19 AS SELECT 1 FROM (SELECT a FROM ab) t1 JOIN (SELECT b FROM AB) t2 on t1.a = t2.b +---- +create-view t.public.v19 + ├── SELECT 1 FROM (SELECT a FROM t.public.ab) AS t1 JOIN (SELECT b FROM t.public.ab) AS t2 ON t1.a = t2.b + ├── columns: "?column?":5 + └── dependencies + ├── ab [columns: a] + └── ab [columns: b] + +# Dependencies should be tracked if the column is used in a projection. +build +CREATE VIEW v20 AS SELECT a + b FROM ab +---- +create-view t.public.v20 + ├── SELECT a + b FROM t.public.ab + ├── columns: "?column?":3 + └── dependencies + └── ab [columns: a b] + +exec-ddl +CREATE TABLE abc (a INT, b INT, c INT) +---- + +# Dependencies should be tracked in an ORDER BY inside a partition. +build +CREATE VIEW v21 AS SELECT sum(a) OVER (PARTITION BY b ORDER BY c) FROM abc +---- +create-view t.public.v21 + ├── SELECT sum(a) OVER (PARTITION BY b ORDER BY c) FROM t.public.abc + ├── columns: sum:5 + └── dependencies + └── abc [columns: a b c] diff --git a/pkg/sql/opt/optbuilder/testdata/with b/pkg/sql/opt/optbuilder/testdata/with index aa378d742459..993def0ee9a9 100644 --- a/pkg/sql/opt/optbuilder/testdata/with +++ b/pkg/sql/opt/optbuilder/testdata/with @@ -93,8 +93,8 @@ create-view t.public.v1 ├── WITH t AS (SELECT a FROM t.public.y WHERE a < 3) SELECT 1 FROM t.public.x NATURAL JOIN t ├── columns: "?column?":7 └── dependencies - ├── y [columns: (0,1)] - └── x [columns: (0-2)] + ├── y [columns: a] + └── x [columns: a] build CREATE TABLE t1 AS diff --git a/pkg/sql/opt/optbuilder/with.go b/pkg/sql/opt/optbuilder/with.go index 025fc28a7170..2cb0956144fb 100644 --- a/pkg/sql/opt/optbuilder/with.go +++ b/pkg/sql/opt/optbuilder/with.go @@ -127,6 +127,11 @@ func (b *Builder) buildCTE( // We don't really know the input row count, except for the first time we run // the recursive query. We don't have anything better though. bindingProps.Stats.RowCount = initialScope.expr.Relational().Stats.RowCount + // Row count must be greater than 0 or the stats code will throw an error. + // Set it to 1 to match the cardinality. + if bindingProps.Stats.RowCount < 1 { + bindingProps.Stats.RowCount = 1 + } cteSrc.bindingProps = bindingProps cteSrc.cols = b.getCTECols(initialScope, cte.Name) diff --git a/pkg/sql/opt/optgen/exprgen/testdata/join b/pkg/sql/opt/optgen/exprgen/testdata/join index b9c2696c26cc..cc7ef5cfb13d 100644 --- a/pkg/sql/opt/optgen/exprgen/testdata/join +++ b/pkg/sql/opt/optgen/exprgen/testdata/join @@ -115,6 +115,7 @@ expr ---- inner-join-apply ├── columns: t.public.abc.a:1(int) t.public.abc.b:2(int) t.public.abc.c:3(int) t.public.def.d:5(int) t.public.def.e:6(int) t.public.def.f:7(int) + ├── immutable ├── stats: [rows=333333.333] ├── cost: 5611.39451 ├── prune: (7) @@ -129,6 +130,7 @@ inner-join-apply ├── select │ ├── columns: t.public.def.d:5(int) t.public.def.e:6(int) t.public.def.f:7(int) │ ├── outer: (1) + │ ├── immutable │ ├── stats: [rows=333.333333, distinct(1)=1, null(1)=0] │ ├── cost: 1080.03 │ ├── prune: (7) @@ -138,7 +140,7 @@ inner-join-apply │ │ ├── cost: 1070.02 │ │ └── prune: (5-7) │ └── filters - │ └── eq [type=bool, outer=(1,5,6), constraints=(/1: (/NULL - ])] + │ └── eq [type=bool, outer=(1,5,6), immutable, constraints=(/1: (/NULL - ])] │ ├── variable: t.public.abc.a:1 [type=int] │ └── plus [type=int] │ ├── variable: t.public.def.d:5 [type=int] diff --git a/pkg/sql/opt/optgen/exprgen/testdata/values b/pkg/sql/opt/optgen/exprgen/testdata/values index dbb56ff93c9b..de5f30986491 100644 --- a/pkg/sql/opt/optgen/exprgen/testdata/values +++ b/pkg/sql/opt/optgen/exprgen/testdata/values @@ -33,6 +33,7 @@ expr project ├── columns: y:2(int!null) x:1(int!null) ├── cardinality: [1 - 1] + ├── immutable ├── stats: [rows=1] ├── cost: 0.05 ├── key: () @@ -49,6 +50,6 @@ project │ └── tuple [type=tuple{int}] │ └── const: 1 [type=int] └── projections - └── plus [as=y:2, type=int, outer=(1)] + └── plus [as=y:2, type=int, outer=(1), immutable] ├── variable: x:1 [type=int] └── const: 10 [type=int] diff --git a/pkg/sql/opt/view_dependencies.go b/pkg/sql/opt/view_dependencies.go index a3254de9201f..9003e9d57abd 100644 --- a/pkg/sql/opt/view_dependencies.go +++ b/pkg/sql/opt/view_dependencies.go @@ -11,6 +11,8 @@ package opt import ( + "sort" + "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/util" ) @@ -23,13 +25,35 @@ type ViewDep struct { DataSource cat.DataSource // ColumnOrdinals is the set of column ordinals that are referenced by the - // view for this table. In most cases, this consists of all "public" columns - // of the table; the only exception is when a table is referenced by table ID - // with a specific list of column IDs. + // view for this table. ColumnOrdinals util.FastIntSet + // ColumnIDToOrd maps a scopeColumn's ColumnID to its ColumnOrdinal. + // This helps us add only the columns that are actually referenced + // by the view's query into the view dependencies. We add a + // dependency on a column only when the column is referenced by the view + // and created as a scopeColumn. + ColumnIDToOrd map[ColumnID]int + // If an index is referenced specifically (via an index hint), SpecificIndex // is true and Index is the ordinal of that index. SpecificIndex bool Index cat.IndexOrdinal } + +// GetColumnNames returns a sorted list of the names of the column dependencies +// and a boolean to determine if the dependency was a table. +// We only track column dependencies on tables. +func (dep ViewDep) GetColumnNames() ([]string, bool) { + colNames := make([]string, 0) + if table, ok := dep.DataSource.(cat.Table); ok { + dep.ColumnOrdinals.ForEach(func(i int) { + name := table.Column(i).ColName() + colNames = append(colNames, name.String()) + }) + sort.Strings(colNames) + return colNames, ok + } + + return nil, false +} diff --git a/pkg/sql/opt/xform/testdata/coster/join b/pkg/sql/opt/xform/testdata/coster/join index 02549a112260..2e37722218d4 100644 --- a/pkg/sql/opt/xform/testdata/coster/join +++ b/pkg/sql/opt/xform/testdata/coster/join @@ -11,12 +11,14 @@ SELECT k, x FROM a INNER JOIN b ON k=x WHERE d=1.0 ---- project ├── columns: k:1!null x:5!null + ├── immutable ├── stats: [rows=99] ├── cost: 2124.725 ├── fd: (1)==(5), (5)==(1) └── inner-join (hash) ├── columns: k:1!null d:4!null x:5!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── stats: [rows=99, distinct(1)=10, null(1)=0, distinct(5)=10, null(5)=0] ├── cost: 2123.725 ├── fd: ()-->(4), (1)==(5), (5)==(1) @@ -26,6 +28,7 @@ project │ └── cost: 1040.02 ├── select │ ├── columns: k:1!null d:4!null + │ ├── immutable │ ├── stats: [rows=10, distinct(1)=10, null(1)=0, distinct(4)=1, null(4)=0] │ ├── cost: 1070.03 │ ├── key: (1) @@ -37,7 +40,7 @@ project │ │ ├── key: (1) │ │ └── fd: (1)-->(4) │ └── filters - │ └── d:4 = 1.0 [outer=(4), constraints=(/4: [/1.0 - /1.0]; tight), fd=()-->(4)] + │ └── d:4 = 1.0 [outer=(4), immutable, constraints=(/4: [/1.0 - /1.0]; tight), fd=()-->(4)] └── filters └── k:1 = x:5 [outer=(1,5), constraints=(/1: (/NULL - ]; /5: (/NULL - ]), fd=(1)==(5), (5)==(1)] diff --git a/pkg/sql/opt/xform/testdata/coster/project b/pkg/sql/opt/xform/testdata/coster/project index 572b477cfab0..d3866dbe0eb2 100644 --- a/pkg/sql/opt/xform/testdata/coster/project +++ b/pkg/sql/opt/xform/testdata/coster/project @@ -7,6 +7,7 @@ SELECT k, i, s || 'foo' FROM a ---- project ├── columns: k:1!null i:2 "?column?":5 + ├── immutable ├── stats: [rows=1000] ├── cost: 1090.03 ├── key: (1) @@ -18,13 +19,14 @@ project │ ├── key: (1) │ └── fd: (1)-->(2,3) └── projections - └── s:3 || 'foo' [as="?column?":5, outer=(3)] + └── s:3 || 'foo' [as="?column?":5, outer=(3), immutable] opt SELECT k, k+2, i*d FROM a ---- project ├── columns: k:1!null "?column?":5!null "?column?":6 + ├── immutable ├── stats: [rows=1000] ├── cost: 1100.03 ├── key: (1) @@ -36,5 +38,5 @@ project │ ├── key: (1) │ └── fd: (1)-->(2,4) └── projections - ├── k:1 + 2 [as="?column?":5, outer=(1)] - └── i:2 * d:4 [as="?column?":6, outer=(2,4)] + ├── k:1 + 2 [as="?column?":5, outer=(1), immutable] + └── i:2 * d:4 [as="?column?":6, outer=(2,4), immutable] diff --git a/pkg/sql/opt/xform/testdata/coster/scan b/pkg/sql/opt/xform/testdata/coster/scan index 3492d8f04382..12e5e1174630 100644 --- a/pkg/sql/opt/xform/testdata/coster/scan +++ b/pkg/sql/opt/xform/testdata/coster/scan @@ -104,6 +104,7 @@ SELECT id FROM speed_test@primary WHERE id BETWEEN 1 AND 1000 AND ((id % 16) = 0 select ├── columns: id:1!null ├── cardinality: [0 - 1000] + ├── immutable ├── stats: [rows=333.333333, distinct(1)=333.333333, null(1)=0] ├── cost: 1030.02 ├── key: (1) @@ -116,7 +117,7 @@ select │ ├── cost: 1020.01 │ └── key: (1) └── filters - └── (id:1 % 16) = 0 [outer=(1)] + └── (id:1 % 16) = 0 [outer=(1), immutable] opt SELECT id FROM speed_test@primary WHERE id BETWEEN 1 AND 2000 AND ((id % 16) = 0) @@ -124,6 +125,7 @@ SELECT id FROM speed_test@primary WHERE id BETWEEN 1 AND 2000 AND ((id % 16) = 0 select ├── columns: id:1!null ├── cardinality: [0 - 2000] + ├── immutable ├── stats: [rows=333.333333, distinct(1)=333.333333, null(1)=0] ├── cost: 1030.02 ├── key: (1) @@ -136,4 +138,4 @@ select │ ├── cost: 1020.01 │ └── key: (1) └── filters - └── (id:1 % 16) = 0 [outer=(1)] + └── (id:1 % 16) = 0 [outer=(1), immutable] diff --git a/pkg/sql/opt/xform/testdata/external/customer b/pkg/sql/opt/xform/testdata/external/customer index 3ad492ee5fd2..3de6b33116b1 100644 --- a/pkg/sql/opt/xform/testdata/external/customer +++ b/pkg/sql/opt/xform/testdata/external/customer @@ -232,12 +232,14 @@ LIMIT 50 project ├── columns: score:9!null expires_at:15!null [hidden: updated_at_inverse:14!null] ├── cardinality: [0 - 50] + ├── immutable ├── fd: ()-->(15) ├── ordering: -9,-14 opt(15) [actual: -9,-14] └── scan leaderboard_record@test_idx,rev ├── columns: id:1!null leaderboard_id:2!null score:9!null updated_at_inverse:14!null expires_at:15!null ├── constraint: /2/15/9/14/1/3: [/'\x74657374'/0 - /'\x74657374'/0/100/500/'\x736f6d655f6964') ├── limit: 50(rev) + ├── immutable ├── key: (1) ├── fd: ()-->(2,15), (1)-->(9,14) └── ordering: -9,-14 opt(2,15) [actual: -9,-14] @@ -569,5 +571,5 @@ project │ │ └── filters (true) │ └── filters (true) └── projections - ├── value:4->>'secondary_id' [as=secondary_id:6, outer=(4)] + ├── value:4->>'secondary_id' [as=secondary_id:6, outer=(4), immutable] └── data:3 || jsonb_build_object('primary_id', primary_id:1) [as="?column?":7, outer=(1,3), stable] diff --git a/pkg/sql/opt/xform/testdata/external/hibernate b/pkg/sql/opt/xform/testdata/external/hibernate index 9ba9b07f084c..49161434f34e 100644 --- a/pkg/sql/opt/xform/testdata/external/hibernate +++ b/pkg/sql/opt/xform/testdata/external/hibernate @@ -1988,18 +1988,22 @@ WHERE ---- project ├── columns: customer1_1_0_:1!null ordernum2_1_0_:2!null orderdat3_1_0_:3!null formula101_0_:18 customer1_2_1_:4 ordernum2_2_1_:5 producti3_2_1_:6 customer1_2_2_:4 ordernum2_2_2_:5 producti3_2_2_:6 quantity4_2_2_:7 + ├── immutable ├── key: (6) ├── fd: ()-->(1-3), (6)-->(4,5,7,18) ├── group-by │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 sum:17 │ ├── grouping columns: lineitems1_.productid:6 + │ ├── immutable │ ├── key: (6) │ ├── fd: ()-->(1-3), (6)-->(1-5,7,17) │ ├── right-join (hash) │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 li.customerid:8 li.ordernumber:9 column16:16 + │ │ ├── immutable │ │ ├── fd: ()-->(1-3), (6)-->(4,5,7) │ │ ├── project │ │ │ ├── columns: column16:16 li.customerid:8!null li.ordernumber:9!null + │ │ │ ├── immutable │ │ │ ├── inner-join (hash) │ │ │ │ ├── columns: li.customerid:8!null li.ordernumber:9!null li.productid:10!null li.quantity:11 p.productid:12!null cost:14 │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -2016,7 +2020,7 @@ project │ │ │ │ └── filters │ │ │ │ └── li.productid:10 = p.productid:12 [outer=(10,12), constraints=(/10: (/NULL - ]; /12: (/NULL - ]), fd=(10)==(12), (12)==(10)] │ │ │ └── projections - │ │ │ └── li.quantity:11 * cost:14 [as=column16:16, outer=(11,14)] + │ │ │ └── li.quantity:11 * cost:14 [as=column16:16, outer=(11,14), immutable] │ │ ├── left-join (merge) │ │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 │ │ │ ├── left ordering: +1,+2 @@ -2110,25 +2114,30 @@ FROM ---- project ├── columns: customer1_0_0_:1!null customer1_1_1_:4 ordernum2_1_1_:5 customer1_2_2_:7 ordernum2_2_2_:8 producti3_2_2_:9 producti1_3_3_:11 name2_0_0_:2!null address3_0_0_:3!null orderdat3_1_1_:6 formula103_1_:30 customer1_1_0__:4 ordernum2_1_0__:5 ordernum2_0__:5 quantity4_2_2_:10 customer1_2_1__:7 ordernum2_2_1__:8 producti3_2_1__:9 descript2_3_3_:12 cost3_3_3_:13 numberav4_3_3_:14 formula104_3_:31 + ├── immutable ├── key: (1,4,5,7-9) ├── fd: (1)-->(2,3), (4,5)-->(6), (7-9)-->(10), (11)-->(12-14), (1,4,5,7-9)-->(2,3,6,10-14,30,31) ├── group-by │ ├── columns: customer0_.customerid:1!null name:2!null address:3!null orders1_.customerid:4 orders1_.ordernumber:5 orderdate:6 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 lineitems2_.quantity:10 product3_.productid:11 product3_.description:12 product3_.cost:13 product3_.numberavailable:14 sum:24 sum:29 │ ├── grouping columns: customer0_.customerid:1!null orders1_.customerid:4 orders1_.ordernumber:5 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 + │ ├── immutable │ ├── key: (1,4,5,7-9) │ ├── fd: (1)-->(2,3), (4,5)-->(6), (7-9)-->(10), (11)-->(12-14), (1,4,5,7-9)-->(2,3,6,10-14,24,29) │ ├── left-join (hash) │ │ ├── columns: customer0_.customerid:1!null name:2!null address:3!null orders1_.customerid:4 orders1_.ordernumber:5 orderdate:6 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 lineitems2_.quantity:10 product3_.productid:11 product3_.description:12 product3_.cost:13 product3_.numberavailable:14 sum:24 li.productid:27 li.quantity:28 │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + │ │ ├── immutable │ │ ├── fd: (1)-->(2,3), (4,5)-->(6), (7-9)-->(10), (11)-->(12-14), (1,4,5,7-9)-->(2,3,6,10-14,24) │ │ ├── group-by │ │ │ ├── columns: customer0_.customerid:1!null name:2!null address:3!null orders1_.customerid:4 orders1_.ordernumber:5 orderdate:6 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 lineitems2_.quantity:10 product3_.productid:11 product3_.description:12 product3_.cost:13 product3_.numberavailable:14 sum:24 │ │ │ ├── grouping columns: customer0_.customerid:1!null orders1_.customerid:4 orders1_.ordernumber:5 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 + │ │ │ ├── immutable │ │ │ ├── key: (1,4,5,7-9) │ │ │ ├── fd: (1)-->(2,3), (4,5)-->(6), (7-9)-->(10), (11)-->(12-14), (1,4,5,7-9)-->(2,3,6,10-14,24) │ │ │ ├── left-join (hash) │ │ │ │ ├── columns: customer0_.customerid:1!null name:2!null address:3!null orders1_.customerid:4 orders1_.ordernumber:5 orderdate:6 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 lineitems2_.quantity:10 product3_.productid:11 product3_.description:12 product3_.cost:13 product3_.numberavailable:14 li.customerid:15 li.ordernumber:16 column23:23 │ │ │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-more) + │ │ │ │ ├── immutable │ │ │ │ ├── fd: (1)-->(2,3), (4,5)-->(6), (7-9)-->(10), (11)-->(12-14), (1,4,5,7-9)-->(11-14) │ │ │ │ ├── left-join (hash) │ │ │ │ │ ├── columns: customer0_.customerid:1!null name:2!null address:3!null orders1_.customerid:4 orders1_.ordernumber:5 orderdate:6 lineitems2_.customerid:7 lineitems2_.ordernumber:8 lineitems2_.productid:9 lineitems2_.quantity:10 product3_.productid:11 product3_.description:12 product3_.cost:13 product3_.numberavailable:14 @@ -2172,6 +2181,7 @@ project │ │ │ │ │ └── lineitems2_.productid:9 = product3_.productid:11 [outer=(9,11), constraints=(/9: (/NULL - ]; /11: (/NULL - ]), fd=(9)==(11), (11)==(9)] │ │ │ │ ├── project │ │ │ │ │ ├── columns: column23:23 li.customerid:15!null li.ordernumber:16!null + │ │ │ │ │ ├── immutable │ │ │ │ │ ├── inner-join (hash) │ │ │ │ │ │ ├── columns: li.customerid:15!null li.ordernumber:16!null li.productid:17!null li.quantity:18 p.productid:19!null p.cost:21 │ │ │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -2188,7 +2198,7 @@ project │ │ │ │ │ │ └── filters │ │ │ │ │ │ └── li.productid:17 = p.productid:19 [outer=(17,19), constraints=(/17: (/NULL - ]; /19: (/NULL - ]), fd=(17)==(19), (19)==(17)] │ │ │ │ │ └── projections - │ │ │ │ │ └── li.quantity:18 * p.cost:21 [as=column23:23, outer=(18,21)] + │ │ │ │ │ └── li.quantity:18 * p.cost:21 [as=column23:23, outer=(18,21), immutable] │ │ │ │ └── filters │ │ │ │ ├── li.customerid:15 = orders1_.customerid:4 [outer=(4,15), constraints=(/4: (/NULL - ]; /15: (/NULL - ]), fd=(4)==(15), (15)==(4)] │ │ │ │ └── li.ordernumber:16 = orders1_.ordernumber:5 [outer=(5,16), constraints=(/5: (/NULL - ]; /16: (/NULL - ]), fd=(5)==(16), (16)==(5)] @@ -2274,18 +2284,22 @@ WHERE ---- project ├── columns: customer1_1_0_:1!null ordernum2_1_0_:2!null orderdat3_1_0_:3!null formula105_0_:18 customer1_2_1_:4 ordernum2_2_1_:5 producti3_2_1_:6 customer1_2_2_:4 ordernum2_2_2_:5 producti3_2_2_:6 quantity4_2_2_:7 + ├── immutable ├── key: (6) ├── fd: ()-->(1-3), (6)-->(4,5,7,18) ├── group-by │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 sum:17 │ ├── grouping columns: lineitems1_.productid:6 + │ ├── immutable │ ├── key: (6) │ ├── fd: ()-->(1-3), (6)-->(1-5,7,17) │ ├── right-join (hash) │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 li.customerid:8 li.ordernumber:9 column16:16 + │ │ ├── immutable │ │ ├── fd: ()-->(1-3), (6)-->(4,5,7) │ │ ├── project │ │ │ ├── columns: column16:16 li.customerid:8!null li.ordernumber:9!null + │ │ │ ├── immutable │ │ │ ├── inner-join (hash) │ │ │ │ ├── columns: li.customerid:8!null li.ordernumber:9!null li.productid:10!null li.quantity:11 p.productid:12!null cost:14 │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -2302,7 +2316,7 @@ project │ │ │ │ └── filters │ │ │ │ └── li.productid:10 = p.productid:12 [outer=(10,12), constraints=(/10: (/NULL - ]; /12: (/NULL - ]), fd=(10)==(12), (12)==(10)] │ │ │ └── projections - │ │ │ └── li.quantity:11 * cost:14 [as=column16:16, outer=(11,14)] + │ │ │ └── li.quantity:11 * cost:14 [as=column16:16, outer=(11,14), immutable] │ │ ├── left-join (merge) │ │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null lineitems1_.customerid:4 lineitems1_.ordernumber:5 lineitems1_.productid:6 lineitems1_.quantity:7 │ │ │ ├── left ordering: +1,+2 @@ -2363,16 +2377,19 @@ FROM ---- project ├── columns: customer1_10_:1!null ordernum2_10_:2!null orderdat3_10_:3!null formula273_:14 + ├── immutable ├── key: (1,2) ├── fd: (1,2)-->(3,14) ├── group-by │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null sum:13 │ ├── grouping columns: order0_.customerid:1!null order0_.ordernumber:2!null + │ ├── immutable │ ├── key: (1,2) │ ├── fd: (1,2)-->(3,13) │ ├── left-join (hash) │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null li.customerid:4 li.ordernumber:5 column12:12 │ │ ├── multiplicity: left-rows(one-or-more), right-rows(zero-or-one) + │ │ ├── immutable │ │ ├── fd: (1,2)-->(3) │ │ ├── scan order0_ │ │ │ ├── columns: order0_.customerid:1!null order0_.ordernumber:2!null orderdate:3!null @@ -2380,6 +2397,7 @@ project │ │ │ └── fd: (1,2)-->(3) │ │ ├── project │ │ │ ├── columns: column12:12 li.customerid:4!null li.ordernumber:5!null + │ │ │ ├── immutable │ │ │ ├── inner-join (hash) │ │ │ │ ├── columns: li.customerid:4!null li.ordernumber:5!null li.productid:6!null quantity:7 p.productid:8!null cost:10 │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -2396,7 +2414,7 @@ project │ │ │ │ └── filters │ │ │ │ └── li.productid:6 = p.productid:8 [outer=(6,8), constraints=(/6: (/NULL - ]; /8: (/NULL - ]), fd=(6)==(8), (8)==(6)] │ │ │ └── projections - │ │ │ └── quantity:7 * cost:10 [as=column12:12, outer=(7,10)] + │ │ │ └── quantity:7 * cost:10 [as=column12:12, outer=(7,10), immutable] │ │ └── filters │ │ ├── li.customerid:4 = order0_.customerid:1 [outer=(1,4), constraints=(/1: (/NULL - ]; /4: (/NULL - ]), fd=(1)==(4), (4)==(1)] │ │ └── li.ordernumber:5 = order0_.ordernumber:2 [outer=(2,5), constraints=(/2: (/NULL - ]; /5: (/NULL - ]), fd=(2)==(5), (5)==(2)] diff --git a/pkg/sql/opt/xform/testdata/external/tpcc b/pkg/sql/opt/xform/testdata/external/tpcc index 619d2740fda0..20a5476544f1 100644 --- a/pkg/sql/opt/xform/testdata/external/tpcc +++ b/pkg/sql/opt/xform/testdata/external/tpcc @@ -39,6 +39,7 @@ project └── project ├── columns: d_next_o_id_new:23 d_id:12!null d_w_id:13!null d_name:14 d_street_1:15 d_street_2:16 d_city:17 d_state:18 d_zip:19 d_tax:20 d_ytd:21 d_next_o_id:22 ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(12-23) ├── scan district @@ -48,7 +49,7 @@ project │ ├── key: () │ └── fd: ()-->(12-22) └── projections - └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22)] + └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22), immutable] opt format=hide-qual SELECT w_tax FROM warehouse WHERE w_id = 10 @@ -585,7 +586,7 @@ project │ └── projections │ ├── crdb_internal.round_decimal_values(customer.c_balance:38 - 3860.61, 2) [as=c_balance:47, outer=(38), immutable] │ ├── crdb_internal.round_decimal_values(customer.c_ytd_payment:39 + 3860.61, 2) [as=c_ytd_payment:48, outer=(39), immutable] - │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40)] + │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40), immutable] │ └── CASE c_credit:35 WHEN 'BC' THEN left((((((c_id:22::STRING || c_d_id:23::STRING) || c_w_id:24::STRING) || '5') || '10') || '3860.61') || c_data:42, 500) ELSE c_data:42 END [as=c_data_new:46, outer=(22-24,35,42), immutable] └── projections └── CASE c_credit:14 WHEN 'BC' THEN left(c_data:21, 200) ELSE '' END [as=case:49, outer=(14,21), immutable] @@ -906,7 +907,7 @@ update customer │ └── fd: ()-->(24), (22,23)-->(25-42) └── projections ├── crdb_internal.round_decimal_values(customer.c_balance:38 + CASE c_d_id:23 WHEN 6 THEN 57214.780000 WHEN 8 THEN 67755.430000 WHEN 1 THEN 51177.840000 WHEN 2 THEN 73840.700000 WHEN 4 THEN 45906.990000 WHEN 9 THEN 32523.760000 WHEN 10 THEN 20240.200000 WHEN 3 THEN 75299.790000 WHEN 5 THEN 56543.340000 WHEN 7 THEN 67157.940000 END, 2) [as=c_balance:45, outer=(23,38), immutable] - └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41)] + └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41), immutable] opt format=hide-qual DELETE FROM new_order @@ -1071,12 +1072,14 @@ WHERE w_ytd != sum_d_ytd scalar-group-by ├── columns: count:22!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(22) ├── inner-join (merge) │ ├── columns: w_id:1!null w_ytd:9!null d_w_id:11!null sum:21!null │ ├── left ordering: +1 │ ├── right ordering: +11 + │ ├── immutable │ ├── key: (11) │ ├── fd: (1)-->(9), (11)-->(21), (1)==(11), (11)==(1) │ ├── scan warehouse @@ -1097,7 +1100,7 @@ scalar-group-by │ │ └── sum [as=sum:21, outer=(19)] │ │ └── d_ytd:19 │ └── filters - │ └── w_ytd:9 != sum:21 [outer=(9,21), constraints=(/9: (/NULL - ]; /21: (/NULL - ])] + │ └── w_ytd:9 != sum:21 [outer=(9,21), immutable, constraints=(/9: (/NULL - ]; /21: (/NULL - ])] └── aggregations └── count-rows [as=count_rows:22] @@ -1165,10 +1168,12 @@ WHERE nod != -1 scalar-group-by ├── columns: count:8!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(8) ├── select │ ├── columns: no_d_id:2!null no_w_id:3!null max:4!null min:5!null count_rows:6!null + │ ├── immutable │ ├── key: (2,3) │ ├── fd: (2,3)-->(4-6) │ ├── group-by @@ -1188,7 +1193,7 @@ scalar-group-by │ │ │ └── no_o_id:1 │ │ └── count-rows [as=count_rows:6] │ └── filters - │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6)] + │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6), immutable] └── aggregations └── count-rows [as=count_rows:8] diff --git a/pkg/sql/opt/xform/testdata/external/tpcc-later-stats b/pkg/sql/opt/xform/testdata/external/tpcc-later-stats index 5c7ee92c9925..cc9552e7230a 100644 --- a/pkg/sql/opt/xform/testdata/external/tpcc-later-stats +++ b/pkg/sql/opt/xform/testdata/external/tpcc-later-stats @@ -42,6 +42,7 @@ project └── project ├── columns: d_next_o_id_new:23 d_id:12!null d_w_id:13!null d_name:14 d_street_1:15 d_street_2:16 d_city:17 d_state:18 d_zip:19 d_tax:20 d_ytd:21 d_next_o_id:22 ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(12-23) ├── scan district @@ -51,7 +52,7 @@ project │ ├── key: () │ └── fd: ()-->(12-22) └── projections - └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22)] + └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22), immutable] opt format=hide-qual SELECT w_tax FROM warehouse WHERE w_id = 10 @@ -588,7 +589,7 @@ project │ └── projections │ ├── crdb_internal.round_decimal_values(customer.c_balance:38 - 3860.61, 2) [as=c_balance:47, outer=(38), immutable] │ ├── crdb_internal.round_decimal_values(customer.c_ytd_payment:39 + 3860.61, 2) [as=c_ytd_payment:48, outer=(39), immutable] - │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40)] + │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40), immutable] │ └── CASE c_credit:35 WHEN 'BC' THEN left((((((c_id:22::STRING || c_d_id:23::STRING) || c_w_id:24::STRING) || '5') || '10') || '3860.61') || c_data:42, 500) ELSE c_data:42 END [as=c_data_new:46, outer=(22-24,35,42), immutable] └── projections └── CASE c_credit:14 WHEN 'BC' THEN left(c_data:21, 200) ELSE '' END [as=case:49, outer=(14,21), immutable] @@ -909,7 +910,7 @@ update customer │ └── fd: ()-->(24), (22,23)-->(25-42) └── projections ├── crdb_internal.round_decimal_values(customer.c_balance:38 + CASE c_d_id:23 WHEN 6 THEN 57214.780000 WHEN 8 THEN 67755.430000 WHEN 1 THEN 51177.840000 WHEN 2 THEN 73840.700000 WHEN 4 THEN 45906.990000 WHEN 9 THEN 32523.760000 WHEN 10 THEN 20240.200000 WHEN 3 THEN 75299.790000 WHEN 5 THEN 56543.340000 WHEN 7 THEN 67157.940000 END, 2) [as=c_balance:45, outer=(23,38), immutable] - └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41)] + └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41), immutable] opt format=hide-qual DELETE FROM new_order @@ -1073,12 +1074,14 @@ WHERE w_ytd != sum_d_ytd scalar-group-by ├── columns: count:22!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(22) ├── inner-join (merge) │ ├── columns: w_id:1!null w_ytd:9!null d_w_id:11!null sum:21!null │ ├── left ordering: +1 │ ├── right ordering: +11 + │ ├── immutable │ ├── key: (11) │ ├── fd: (1)-->(9), (11)-->(21), (1)==(11), (11)==(1) │ ├── scan warehouse @@ -1099,7 +1102,7 @@ scalar-group-by │ │ └── sum [as=sum:21, outer=(19)] │ │ └── d_ytd:19 │ └── filters - │ └── w_ytd:9 != sum:21 [outer=(9,21), constraints=(/9: (/NULL - ]; /21: (/NULL - ])] + │ └── w_ytd:9 != sum:21 [outer=(9,21), immutable, constraints=(/9: (/NULL - ]; /21: (/NULL - ])] └── aggregations └── count-rows [as=count_rows:22] @@ -1167,10 +1170,12 @@ WHERE nod != -1 scalar-group-by ├── columns: count:8!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(8) ├── select │ ├── columns: no_d_id:2!null no_w_id:3!null max:4!null min:5!null count_rows:6!null + │ ├── immutable │ ├── key: (2,3) │ ├── fd: (2,3)-->(4-6) │ ├── group-by @@ -1190,7 +1195,7 @@ scalar-group-by │ │ │ └── no_o_id:1 │ │ └── count-rows [as=count_rows:6] │ └── filters - │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6)] + │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6), immutable] └── aggregations └── count-rows [as=count_rows:8] diff --git a/pkg/sql/opt/xform/testdata/external/tpcc-no-stats b/pkg/sql/opt/xform/testdata/external/tpcc-no-stats index 5e33f14a1419..50ac4ef9384f 100644 --- a/pkg/sql/opt/xform/testdata/external/tpcc-no-stats +++ b/pkg/sql/opt/xform/testdata/external/tpcc-no-stats @@ -36,6 +36,7 @@ project └── project ├── columns: d_next_o_id_new:23 d_id:12!null d_w_id:13!null d_name:14 d_street_1:15 d_street_2:16 d_city:17 d_state:18 d_zip:19 d_tax:20 d_ytd:21 d_next_o_id:22 ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(12-23) ├── scan district @@ -45,7 +46,7 @@ project │ ├── key: () │ └── fd: ()-->(12-22) └── projections - └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22)] + └── d_next_o_id:22 + 1 [as=d_next_o_id_new:23, outer=(22), immutable] opt format=hide-qual SELECT w_tax FROM warehouse WHERE w_id = 10 @@ -582,7 +583,7 @@ project │ └── projections │ ├── crdb_internal.round_decimal_values(customer.c_balance:38 - 3860.61, 2) [as=c_balance:47, outer=(38), immutable] │ ├── crdb_internal.round_decimal_values(customer.c_ytd_payment:39 + 3860.61, 2) [as=c_ytd_payment:48, outer=(39), immutable] - │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40)] + │ ├── c_payment_cnt:40 + 1 [as=c_payment_cnt_new:45, outer=(40), immutable] │ └── CASE c_credit:35 WHEN 'BC' THEN left((((((c_id:22::STRING || c_d_id:23::STRING) || c_w_id:24::STRING) || '5') || '10') || '3860.61') || c_data:42, 500) ELSE c_data:42 END [as=c_data_new:46, outer=(22-24,35,42), immutable] └── projections └── CASE c_credit:14 WHEN 'BC' THEN left(c_data:21, 200) ELSE '' END [as=case:49, outer=(14,21), immutable] @@ -907,7 +908,7 @@ update customer │ └── fd: ()-->(24), (22,23)-->(25-42) └── projections ├── crdb_internal.round_decimal_values(customer.c_balance:38 + CASE c_d_id:23 WHEN 6 THEN 57214.780000 WHEN 8 THEN 67755.430000 WHEN 1 THEN 51177.840000 WHEN 2 THEN 73840.700000 WHEN 4 THEN 45906.990000 WHEN 9 THEN 32523.760000 WHEN 10 THEN 20240.200000 WHEN 3 THEN 75299.790000 WHEN 5 THEN 56543.340000 WHEN 7 THEN 67157.940000 END, 2) [as=c_balance:45, outer=(23,38), immutable] - └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41)] + └── c_delivery_cnt:41 + 1 [as=c_delivery_cnt_new:43, outer=(41), immutable] opt format=hide-qual DELETE FROM new_order @@ -1071,12 +1072,14 @@ WHERE w_ytd != sum_d_ytd scalar-group-by ├── columns: count:22!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(22) ├── inner-join (lookup warehouse) │ ├── columns: w_id:1!null w_ytd:9!null d_w_id:11!null sum:21!null │ ├── key columns: [11] = [1] │ ├── lookup columns are key + │ ├── immutable │ ├── key: (11) │ ├── fd: (1)-->(9), (11)-->(21), (1)==(11), (11)==(1) │ ├── group-by @@ -1092,7 +1095,7 @@ scalar-group-by │ │ └── sum [as=sum:21, outer=(19)] │ │ └── d_ytd:19 │ └── filters - │ └── w_ytd:9 != sum:21 [outer=(9,21), constraints=(/9: (/NULL - ]; /21: (/NULL - ])] + │ └── w_ytd:9 != sum:21 [outer=(9,21), immutable, constraints=(/9: (/NULL - ]; /21: (/NULL - ])] └── aggregations └── count-rows [as=count_rows:22] @@ -1160,10 +1163,12 @@ WHERE nod != -1 scalar-group-by ├── columns: count:8!null ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(8) ├── select │ ├── columns: no_d_id:2!null no_w_id:3!null max:4!null min:5!null count_rows:6!null + │ ├── immutable │ ├── key: (2,3) │ ├── fd: (2,3)-->(4-6) │ ├── group-by @@ -1183,7 +1188,7 @@ scalar-group-by │ │ │ └── no_o_id:1 │ │ └── count-rows [as=count_rows:6] │ └── filters - │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6)] + │ └── ((max:4 - min:5) - count_rows:6) != -1 [outer=(4-6), immutable] └── aggregations └── count-rows [as=count_rows:8] diff --git a/pkg/sql/opt/xform/testdata/external/tpch b/pkg/sql/opt/xform/testdata/external/tpch index 5ed71ce79f0f..a985dbf3f03a 100644 --- a/pkg/sql/opt/xform/testdata/external/tpch +++ b/pkg/sql/opt/xform/testdata/external/tpch @@ -42,16 +42,19 @@ ORDER BY ---- sort ├── columns: l_returnflag:9!null l_linestatus:10!null sum_qty:17!null sum_base_price:18!null sum_disc_price:20!null sum_charge:22!null avg_qty:23!null avg_price:24!null avg_disc:25!null count_order:26!null + ├── immutable ├── key: (9,10) ├── fd: (9,10)-->(17,18,20,22-26) ├── ordering: +9,+10 └── group-by ├── columns: l_returnflag:9!null l_linestatus:10!null sum:17!null sum:18!null sum:20!null sum:22!null avg:23!null avg:24!null avg:25!null count_rows:26!null ├── grouping columns: l_returnflag:9!null l_linestatus:10!null + ├── immutable ├── key: (9,10) ├── fd: (9,10)-->(17,18,20,22-26) ├── project │ ├── columns: column19:19!null column21:21!null l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_returnflag:9!null l_linestatus:10!null + │ ├── immutable │ ├── select │ │ ├── columns: l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_tax:8!null l_returnflag:9!null l_linestatus:10!null l_shipdate:11!null │ │ ├── scan lineitem @@ -59,8 +62,8 @@ sort │ │ └── filters │ │ └── l_shipdate:11 <= '1998-09-02' [outer=(11), constraints=(/11: (/NULL - /'1998-09-02']; tight)] │ └── projections - │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, outer=(6,7)] - │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, outer=(6-8)] + │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, outer=(6,7), immutable] + │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, outer=(6-8), immutable] └── aggregations ├── sum [as=sum:17, outer=(5)] │ └── l_quantity:5 @@ -342,11 +345,13 @@ limit ├── columns: l_orderkey:18!null revenue:35!null o_orderdate:13!null o_shippriority:16!null ├── internal-ordering: -35,+13 ├── cardinality: [0 - 10] + ├── immutable ├── key: (18) ├── fd: (18)-->(13,16,35) ├── ordering: -35,+13 ├── sort │ ├── columns: o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null sum:35!null + │ ├── immutable │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) │ ├── ordering: -35,+13 @@ -354,10 +359,12 @@ limit │ └── group-by │ ├── columns: o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null sum:35!null │ ├── grouping columns: l_orderkey:18!null + │ ├── immutable │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) │ ├── project │ │ ├── columns: column34:34!null o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null + │ │ ├── immutable │ │ ├── fd: (18)-->(13,16) │ │ ├── inner-join (lookup lineitem) │ │ │ ├── columns: c_custkey:1!null c_mktsegment:7!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null l_extendedprice:23!null l_discount:24!null l_shipdate:28!null @@ -393,7 +400,7 @@ limit │ │ │ └── filters │ │ │ └── l_shipdate:28 > '1995-03-15' [outer=(28), constraints=(/28: [/'1995-03-16' - ]; tight)] │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:35, outer=(34)] │ │ └── column34:34 @@ -516,16 +523,19 @@ ORDER BY ---- sort ├── columns: n_name:42!null revenue:49!null + ├── immutable ├── key: (42) ├── fd: (42)-->(49) ├── ordering: -49 └── group-by ├── columns: n_name:42!null sum:49!null ├── grouping columns: n_name:42!null + ├── immutable ├── key: (42) ├── fd: (42)-->(49) ├── project │ ├── columns: column48:48!null n_name:42!null + │ ├── immutable │ ├── inner-join (hash) │ │ ├── columns: c_custkey:1!null c_nationkey:4!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null l_orderkey:18!null l_suppkey:20!null l_extendedprice:23!null l_discount:24!null s_suppkey:34!null s_nationkey:37!null n_nationkey:41!null n_name:42!null n_regionkey:43!null r_regionkey:45!null r_name:46!null │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -588,7 +598,7 @@ sort │ │ ├── c_custkey:1 = o_custkey:10 [outer=(1,10), constraints=(/1: (/NULL - ]; /10: (/NULL - ]), fd=(1)==(10), (10)==(1)] │ │ └── c_nationkey:4 = s_nationkey:37 [outer=(4,37), constraints=(/4: (/NULL - ]; /37: (/NULL - ]), fd=(4)==(37), (37)==(4)] │ └── projections - │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, outer=(23,24)] + │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, outer=(23,24), immutable] └── aggregations └── sum [as=sum:49, outer=(48)] └── column48:48 @@ -622,10 +632,12 @@ WHERE scalar-group-by ├── columns: revenue:18 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(18) ├── project │ ├── columns: column17:17!null + │ ├── immutable │ ├── select │ │ ├── columns: l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_shipdate:11!null │ │ ├── index-join lineitem @@ -639,7 +651,7 @@ scalar-group-by │ │ ├── (l_discount:7 >= 0.05) AND (l_discount:7 <= 0.07) [outer=(7), constraints=(/7: [/0.05 - /0.07]; tight)] │ │ └── l_quantity:5 < 24.0 [outer=(5), constraints=(/5: (/NULL - /23.999999999999996]; tight)] │ └── projections - │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, outer=(6,7)] + │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, outer=(6,7), immutable] └── aggregations └── sum [as=sum:18, outer=(17)] └── column17:17 @@ -759,7 +771,7 @@ sort │ │ └── s_nationkey:4 = n1.n_nationkey:41 [outer=(4,41), constraints=(/4: (/NULL - ]; /41: (/NULL - ]), fd=(4)==(41), (41)==(4)] │ └── projections │ ├── extract('year', l_shipdate:18) [as=l_year:49, outer=(18), immutable] - │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, outer=(13,14)] + │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, outer=(13,14), immutable] └── aggregations └── sum [as=sum:51, outer=(50)] └── volume:50 @@ -934,7 +946,7 @@ sort │ │ │ │ └── p_partkey:1 = l_partkey:18 [outer=(1,18), constraints=(/1: (/NULL - ]; /18: (/NULL - ]), fd=(1)==(18), (18)==(1)] │ │ │ └── projections │ │ │ ├── extract('year', o_orderdate:37) [as=o_year:61, outer=(37), immutable] - │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, outer=(22,23)] + │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, outer=(22,23), immutable] │ │ └── projections │ │ └── CASE WHEN n2.n_name:55 = 'BRAZIL' THEN volume:62 ELSE 0.0 END [as=column63:63, outer=(55,62)] │ └── aggregations @@ -1060,7 +1072,7 @@ sort │ │ └── p_name:2 LIKE '%green%' [outer=(2), constraints=(/2: (/NULL - ])] │ └── projections │ ├── extract('year', o_orderdate:42) [as=o_year:51, outer=(42), immutable] - │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, outer=(21-23,36)] + │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, outer=(21-23,36), immutable] └── aggregations └── sum [as=sum:53, outer=(52)] └── amount:52 @@ -1117,11 +1129,13 @@ limit ├── columns: c_custkey:1!null c_name:2!null revenue:39!null c_acctbal:6!null n_name:35!null c_address:3!null c_phone:5!null c_comment:8!null ├── internal-ordering: -39 ├── cardinality: [0 - 20] + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3,5,6,8,35,39) ├── ordering: -39 ├── sort │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null sum:39!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) │ ├── ordering: -39 @@ -1129,10 +1143,12 @@ limit │ └── group-by │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null sum:39!null │ ├── grouping columns: c_custkey:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) │ ├── project │ │ ├── columns: column38:38!null c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null + │ │ ├── immutable │ │ ├── fd: (1)-->(2,3,5,6,8,35) │ │ ├── inner-join (hash) │ │ │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_nationkey:4!null c_phone:5!null c_acctbal:6!null c_comment:8!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null l_orderkey:18!null l_extendedprice:23!null l_discount:24!null l_returnflag:26!null n_nationkey:34!null n_name:35!null @@ -1170,7 +1186,7 @@ limit │ │ │ └── filters │ │ │ └── c_nationkey:4 = n_nationkey:34 [outer=(4,34), constraints=(/4: (/NULL - ]; /34: (/NULL - ]), fd=(4)==(34), (34)==(4)] │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:39, outer=(38)] │ │ └── column38:38 @@ -1334,7 +1350,7 @@ sort │ └── sum [as=sum:36, outer=(35)] │ └── column35:35 └── projections - └── sum:36 * 0.0001 [as="?column?":37, outer=(36)] + └── sum:36 * 0.0001 [as="?column?":37, outer=(36), immutable] # -------------------------------------------------- # Q12 @@ -1527,10 +1543,12 @@ project ├── scalar-group-by │ ├── columns: sum:27 sum:29 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(27,29) │ ├── project │ │ ├── columns: column26:26!null column28:28!null + │ │ ├── immutable │ │ ├── inner-join (hash) │ │ │ ├── columns: l_partkey:2!null l_extendedprice:6!null l_discount:7!null l_shipdate:11!null p_partkey:17!null p_type:21!null │ │ │ ├── multiplicity: left-rows(zero-or-more), right-rows(exactly-one) @@ -1549,8 +1567,8 @@ project │ │ │ └── filters │ │ │ └── l_partkey:2 = p_partkey:17 [outer=(2,17), constraints=(/2: (/NULL - ]; /17: (/NULL - ]), fd=(2)==(17), (17)==(2)] │ │ └── projections - │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, outer=(6,7,21)] - │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, outer=(6,7)] + │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, outer=(6,7,21), immutable] + │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, outer=(6,7), immutable] │ └── aggregations │ ├── sum [as=sum:27, outer=(26)] │ │ └── column26:26 @@ -1607,6 +1625,7 @@ ORDER BY ---- project ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_phone:5!null total_revenue:25!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3,5,25) ├── ordering: +1 @@ -1614,6 +1633,7 @@ project ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_phone:5!null l_suppkey:10!null sum:25!null ├── left ordering: +1 ├── right ordering: +10 + ├── immutable ├── key: (10) ├── fd: (1)-->(2,3,5), (10)-->(25), (1)==(10), (10)==(1) ├── ordering: +(1|10) [actual: +1] @@ -1624,20 +1644,24 @@ project │ └── ordering: +1 ├── sort │ ├── columns: l_suppkey:10!null sum:25!null + │ ├── immutable │ ├── key: (10) │ ├── fd: (10)-->(25) │ ├── ordering: +10 │ └── select │ ├── columns: l_suppkey:10!null sum:25!null + │ ├── immutable │ ├── key: (10) │ ├── fd: (10)-->(25) │ ├── group-by │ │ ├── columns: l_suppkey:10!null sum:25!null │ │ ├── grouping columns: l_suppkey:10!null + │ │ ├── immutable │ │ ├── key: (10) │ │ ├── fd: (10)-->(25) │ │ ├── project │ │ │ ├── columns: column24:24!null l_suppkey:10!null + │ │ │ ├── immutable │ │ │ ├── index-join lineitem │ │ │ │ ├── columns: l_suppkey:10!null l_extendedprice:13!null l_discount:14!null l_shipdate:18!null │ │ │ │ └── scan lineitem@l_sd @@ -1646,26 +1670,29 @@ project │ │ │ │ ├── key: (8,11) │ │ │ │ └── fd: (8,11)-->(18) │ │ │ └── projections - │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, outer=(13,14)] + │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, outer=(13,14), immutable] │ │ └── aggregations │ │ └── sum [as=sum:25, outer=(24)] │ │ └── column24:24 │ └── filters - │ └── eq [outer=(25), subquery, constraints=(/25: (/NULL - ])] + │ └── eq [outer=(25), immutable, subquery, constraints=(/25: (/NULL - ])] │ ├── sum:25 │ └── subquery │ └── scalar-group-by │ ├── columns: max:44 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(44) │ ├── group-by │ │ ├── columns: l_suppkey:28!null sum:43!null │ │ ├── grouping columns: l_suppkey:28!null + │ │ ├── immutable │ │ ├── key: (28) │ │ ├── fd: (28)-->(43) │ │ ├── project │ │ │ ├── columns: column42:42!null l_suppkey:28!null + │ │ │ ├── immutable │ │ │ ├── index-join lineitem │ │ │ │ ├── columns: l_suppkey:28!null l_extendedprice:31!null l_discount:32!null l_shipdate:36!null │ │ │ │ └── scan lineitem@l_sd @@ -1674,7 +1701,7 @@ project │ │ │ │ ├── key: (26,29) │ │ │ │ └── fd: (26,29)-->(36) │ │ │ └── projections - │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, outer=(31,32)] + │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, outer=(31,32), immutable] │ │ └── aggregations │ │ └── sum [as=sum:43, outer=(42)] │ │ └── column42:42 @@ -1820,25 +1847,30 @@ WHERE project ├── columns: avg_yearly:45 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(45) ├── scalar-group-by │ ├── columns: sum:44 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(44) │ ├── inner-join (lookup lineitem) │ │ ├── columns: l_partkey:2!null l_quantity:5!null l_extendedprice:6!null p_partkey:17!null "?column?":43!null │ │ ├── key columns: [1 4] = [1 4] │ │ ├── lookup columns are key + │ │ ├── immutable │ │ ├── fd: (17)-->(43), (2)==(17), (17)==(2) │ │ ├── inner-join (lookup lineitem@l_pk) │ │ │ ├── columns: l_orderkey:1!null l_partkey:2!null l_linenumber:4!null p_partkey:17!null "?column?":43 │ │ │ ├── key columns: [17] = [2] + │ │ │ ├── immutable │ │ │ ├── key: (1,4) │ │ │ ├── fd: (17)-->(43), (1,4)-->(2), (2)==(17), (17)==(2) │ │ │ ├── project │ │ │ │ ├── columns: "?column?":43 p_partkey:17!null + │ │ │ │ ├── immutable │ │ │ │ ├── key: (17) │ │ │ │ ├── fd: (17)-->(43) │ │ │ │ ├── group-by @@ -1878,7 +1910,7 @@ project │ │ │ │ │ └── avg [as=avg:42, outer=(30)] │ │ │ │ │ └── l_quantity:30 │ │ │ │ └── projections - │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, outer=(42)] + │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, outer=(42), immutable] │ │ │ └── filters (true) │ │ └── filters │ │ └── l_quantity:5 < "?column?":43 [outer=(5,43), constraints=(/5: (/NULL - ]; /43: (/NULL - ])] @@ -2067,10 +2099,12 @@ WHERE scalar-group-by ├── columns: revenue:27 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(27) ├── project │ ├── columns: column26:26!null + │ ├── immutable │ ├── inner-join (hash) │ │ ├── columns: l_partkey:2!null l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_shipinstruct:14!null l_shipmode:15!null p_partkey:17!null p_brand:20!null p_size:22!null p_container:23!null │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -2097,7 +2131,7 @@ scalar-group-by │ │ ├── p_partkey:17 = l_partkey:2 [outer=(2,17), constraints=(/2: (/NULL - ]; /17: (/NULL - ]), fd=(2)==(17), (17)==(2)] │ │ └── ((((((p_brand:20 = 'Brand#12') AND (p_container:23 IN ('SM BOX', 'SM CASE', 'SM PACK', 'SM PKG'))) AND (l_quantity:5 >= 1.0)) AND (l_quantity:5 <= 11.0)) AND (p_size:22 <= 5)) OR (((((p_brand:20 = 'Brand#23') AND (p_container:23 IN ('MED BAG', 'MED BOX', 'MED PACK', 'MED PKG'))) AND (l_quantity:5 >= 10.0)) AND (l_quantity:5 <= 20.0)) AND (p_size:22 <= 10))) OR (((((p_brand:20 = 'Brand#34') AND (p_container:23 IN ('LG BOX', 'LG CASE', 'LG PACK', 'LG PKG'))) AND (l_quantity:5 >= 20.0)) AND (l_quantity:5 <= 30.0)) AND (p_size:22 <= 15)) [outer=(5,20,22,23), constraints=(/5: [/1.0 - /30.0]; /20: [/'Brand#12' - /'Brand#12'] [/'Brand#23' - /'Brand#23'] [/'Brand#34' - /'Brand#34']; /22: (/NULL - /15]; /23: [/'LG BOX' - /'LG BOX'] [/'LG CASE' - /'LG CASE'] [/'LG PACK' - /'LG PACK'] [/'LG PKG' - /'LG PKG'] [/'MED BAG' - /'MED BAG'] [/'MED BOX' - /'MED BOX'] [/'MED PACK' - /'MED PACK'] [/'MED PKG' - /'MED PKG'] [/'SM BOX' - /'SM BOX'] [/'SM CASE' - /'SM CASE'] [/'SM PACK' - /'SM PACK'] [/'SM PKG' - /'SM PKG'])] │ └── projections - │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, outer=(6,7)] + │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, outer=(6,7), immutable] └── aggregations └── sum [as=sum:27, outer=(26)] └── column26:26 @@ -2157,16 +2191,20 @@ ORDER BY ---- sort ├── columns: s_name:2!null s_address:3!null + ├── immutable ├── ordering: +2 └── project ├── columns: s_name:2!null s_address:3!null + ├── immutable └── inner-join (hash) ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_nationkey:4!null n_nationkey:8!null n_name:9!null ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + ├── immutable ├── key: (1) ├── fd: ()-->(9), (1)-->(2-4), (4)==(8), (8)==(4) ├── semi-join (hash) │ ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_nationkey:4!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2-4) │ ├── scan supplier @@ -2175,18 +2213,22 @@ sort │ │ └── fd: (1)-->(2-4) │ ├── project │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null + │ │ ├── immutable │ │ ├── key: (12,13) │ │ └── project │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null p_partkey:17!null + │ │ ├── immutable │ │ ├── key: (13,17) │ │ ├── fd: (12)==(17), (17)==(12) │ │ └── inner-join (hash) │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null ps_availqty:14!null p_partkey:17!null p_name:18!null sum:42 │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ │ ├── immutable │ │ ├── key: (13,17) │ │ ├── fd: (12,13)-->(14,42), (17)-->(18), (12)==(17), (17)==(12) │ │ ├── select │ │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null ps_availqty:14!null sum:42 + │ │ │ ├── immutable │ │ │ ├── key: (12,13) │ │ │ ├── fd: (12,13)-->(14,42) │ │ │ ├── group-by @@ -2217,7 +2259,7 @@ sort │ │ │ │ └── const-agg [as=ps_availqty:14, outer=(14)] │ │ │ │ └── ps_availqty:14 │ │ │ └── filters - │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [outer=(14,42), constraints=(/14: (/NULL - ])] + │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [outer=(14,42), immutable, constraints=(/14: (/NULL - ])] │ │ ├── select │ │ │ ├── columns: p_partkey:17!null p_name:18!null │ │ │ ├── key: (17) diff --git a/pkg/sql/opt/xform/testdata/external/tpch-no-stats b/pkg/sql/opt/xform/testdata/external/tpch-no-stats index afc1dfbeb09c..b248d7168fb7 100644 --- a/pkg/sql/opt/xform/testdata/external/tpch-no-stats +++ b/pkg/sql/opt/xform/testdata/external/tpch-no-stats @@ -40,14 +40,17 @@ ORDER BY group-by ├── columns: l_returnflag:9!null l_linestatus:10!null sum_qty:17!null sum_base_price:18!null sum_disc_price:20!null sum_charge:22!null avg_qty:23!null avg_price:24!null avg_disc:25!null count_order:26!null ├── grouping columns: l_returnflag:9!null l_linestatus:10!null + ├── immutable ├── key: (9,10) ├── fd: (9,10)-->(17,18,20,22-26) ├── ordering: +9,+10 ├── sort │ ├── columns: l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_returnflag:9!null l_linestatus:10!null column19:19!null column21:21!null + │ ├── immutable │ ├── ordering: +9,+10 │ └── project │ ├── columns: column19:19!null column21:21!null l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_returnflag:9!null l_linestatus:10!null + │ ├── immutable │ ├── select │ │ ├── columns: l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_tax:8!null l_returnflag:9!null l_linestatus:10!null l_shipdate:11!null │ │ ├── scan lineitem @@ -55,8 +58,8 @@ group-by │ │ └── filters │ │ └── l_shipdate:11 <= '1998-09-02' [outer=(11), constraints=(/11: (/NULL - /'1998-09-02']; tight)] │ └── projections - │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, outer=(6,7)] - │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, outer=(6-8)] + │ ├── l_extendedprice:6 * (1.0 - l_discount:7) [as=column19:19, outer=(6,7), immutable] + │ └── (l_extendedprice:6 * (1.0 - l_discount:7)) * (l_tax:8 + 1.0) [as=column21:21, outer=(6-8), immutable] └── aggregations ├── sum [as=sum:17, outer=(5)] │ └── l_quantity:5 @@ -339,11 +342,13 @@ limit ├── columns: l_orderkey:18!null revenue:35!null o_orderdate:13!null o_shippriority:16!null ├── internal-ordering: -35,+13 ├── cardinality: [0 - 10] + ├── immutable ├── key: (18) ├── fd: (18)-->(13,16,35) ├── ordering: -35,+13 ├── sort │ ├── columns: o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null sum:35!null + │ ├── immutable │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) │ ├── ordering: -35,+13 @@ -351,10 +356,12 @@ limit │ └── group-by │ ├── columns: o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null sum:35!null │ ├── grouping columns: l_orderkey:18!null + │ ├── immutable │ ├── key: (18) │ ├── fd: (18)-->(13,16,35) │ ├── project │ │ ├── columns: column34:34!null o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null + │ │ ├── immutable │ │ ├── fd: (18)-->(13,16) │ │ ├── inner-join (hash) │ │ │ ├── columns: c_custkey:1!null c_mktsegment:7!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null o_shippriority:16!null l_orderkey:18!null l_extendedprice:23!null l_discount:24!null l_shipdate:28!null @@ -393,7 +400,7 @@ limit │ │ │ └── filters │ │ │ └── l_orderkey:18 = o_orderkey:9 [outer=(9,18), constraints=(/9: (/NULL - ]; /18: (/NULL - ]), fd=(9)==(18), (18)==(9)] │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column34:34, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:35, outer=(34)] │ │ └── column34:34 @@ -520,16 +527,19 @@ ORDER BY ---- sort ├── columns: n_name:42!null revenue:49!null + ├── immutable ├── key: (42) ├── fd: (42)-->(49) ├── ordering: -49 └── group-by ├── columns: n_name:42!null sum:49!null ├── grouping columns: n_name:42!null + ├── immutable ├── key: (42) ├── fd: (42)-->(49) ├── project │ ├── columns: column48:48!null n_name:42!null + │ ├── immutable │ ├── inner-join (hash) │ │ ├── columns: c_custkey:1!null c_nationkey:4!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null l_orderkey:18!null l_suppkey:20!null l_extendedprice:23!null l_discount:24!null s_suppkey:34!null s_nationkey:37!null n_nationkey:41!null n_name:42!null n_regionkey:43!null r_regionkey:45!null r_name:46!null │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) @@ -600,7 +610,7 @@ sort │ │ ├── c_custkey:1 = o_custkey:10 [outer=(1,10), constraints=(/1: (/NULL - ]; /10: (/NULL - ]), fd=(1)==(10), (10)==(1)] │ │ └── c_nationkey:4 = s_nationkey:37 [outer=(4,37), constraints=(/4: (/NULL - ]; /37: (/NULL - ]), fd=(4)==(37), (37)==(4)] │ └── projections - │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, outer=(23,24)] + │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column48:48, outer=(23,24), immutable] └── aggregations └── sum [as=sum:49, outer=(48)] └── column48:48 @@ -634,10 +644,12 @@ WHERE scalar-group-by ├── columns: revenue:18 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(18) ├── project │ ├── columns: column17:17!null + │ ├── immutable │ ├── select │ │ ├── columns: l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_shipdate:11!null │ │ ├── scan lineitem @@ -647,7 +659,7 @@ scalar-group-by │ │ ├── (l_shipdate:11 >= '1994-01-01') AND (l_shipdate:11 < '1995-01-01') [outer=(11), constraints=(/11: [/'1994-01-01' - /'1994-12-31']; tight)] │ │ └── l_quantity:5 < 24.0 [outer=(5), constraints=(/5: (/NULL - /23.999999999999996]; tight)] │ └── projections - │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, outer=(6,7)] + │ └── l_extendedprice:6 * l_discount:7 [as=column17:17, outer=(6,7), immutable] └── aggregations └── sum [as=sum:18, outer=(17)] └── column17:17 @@ -780,7 +792,7 @@ group-by │ │ └── s_nationkey:4 = n1.n_nationkey:41 [outer=(4,41), constraints=(/4: (/NULL - ]; /41: (/NULL - ]), fd=(4)==(41), (41)==(4)] │ └── projections │ ├── extract('year', l_shipdate:18) [as=l_year:49, outer=(18), immutable] - │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, outer=(13,14)] + │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=volume:50, outer=(13,14), immutable] └── aggregations └── sum [as=sum:51, outer=(50)] └── volume:50 @@ -943,7 +955,7 @@ sort │ │ │ │ └── p_type:5 = 'ECONOMY ANODIZED STEEL' [outer=(5), constraints=(/5: [/'ECONOMY ANODIZED STEEL' - /'ECONOMY ANODIZED STEEL']; tight), fd=()-->(5)] │ │ │ └── projections │ │ │ ├── extract('year', o_orderdate:37) [as=o_year:61, outer=(37), immutable] - │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, outer=(22,23)] + │ │ │ └── l_extendedprice:22 * (1.0 - l_discount:23) [as=volume:62, outer=(22,23), immutable] │ │ └── projections │ │ └── CASE WHEN n2.n_name:55 = 'BRAZIL' THEN volume:62 ELSE 0.0 END [as=column63:63, outer=(55,62)] │ └── aggregations @@ -1069,7 +1081,7 @@ sort │ │ └── p_name:2 LIKE '%green%' [outer=(2), constraints=(/2: (/NULL - ])] │ └── projections │ ├── extract('year', o_orderdate:42) [as=o_year:51, outer=(42), immutable] - │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, outer=(21-23,36)] + │ └── (l_extendedprice:22 * (1.0 - l_discount:23)) - (ps_supplycost:36 * l_quantity:21) [as=amount:52, outer=(21-23,36), immutable] └── aggregations └── sum [as=sum:53, outer=(52)] └── amount:52 @@ -1126,11 +1138,13 @@ limit ├── columns: c_custkey:1!null c_name:2!null revenue:39!null c_acctbal:6!null n_name:35!null c_address:3!null c_phone:5!null c_comment:8!null ├── internal-ordering: -39 ├── cardinality: [0 - 20] + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3,5,6,8,35,39) ├── ordering: -39 ├── sort │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null sum:39!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) │ ├── ordering: -39 @@ -1138,10 +1152,12 @@ limit │ └── group-by │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null sum:39!null │ ├── grouping columns: c_custkey:1!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2,3,5,6,8,35,39) │ ├── project │ │ ├── columns: column38:38!null c_custkey:1!null c_name:2!null c_address:3!null c_phone:5!null c_acctbal:6!null c_comment:8!null n_name:35!null + │ │ ├── immutable │ │ ├── fd: (1)-->(2,3,5,6,8,35) │ │ ├── inner-join (lookup nation) │ │ │ ├── columns: c_custkey:1!null c_name:2!null c_address:3!null c_nationkey:4!null c_phone:5!null c_acctbal:6!null c_comment:8!null o_orderkey:9!null o_custkey:10!null o_orderdate:13!null l_orderkey:18!null l_extendedprice:23!null l_discount:24!null l_returnflag:26!null n_nationkey:34!null n_name:35!null @@ -1170,7 +1186,7 @@ limit │ │ │ │ └── filters (true) │ │ │ └── filters (true) │ │ └── projections - │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, outer=(23,24)] + │ │ └── l_extendedprice:23 * (1.0 - l_discount:24) [as=column38:38, outer=(23,24), immutable] │ └── aggregations │ ├── sum [as=sum:39, outer=(38)] │ │ └── column38:38 @@ -1328,7 +1344,7 @@ sort │ └── sum [as=sum:36, outer=(35)] │ └── column35:35 └── projections - └── sum:36 * 0.0001 [as="?column?":37, outer=(36)] + └── sum:36 * 0.0001 [as="?column?":37, outer=(36), immutable] # -------------------------------------------------- # Q12 @@ -1518,10 +1534,12 @@ project ├── scalar-group-by │ ├── columns: sum:27 sum:29 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(27,29) │ ├── project │ │ ├── columns: column26:26!null column28:28!null + │ │ ├── immutable │ │ ├── inner-join (hash) │ │ │ ├── columns: l_partkey:2!null l_extendedprice:6!null l_discount:7!null l_shipdate:11!null p_partkey:17!null p_type:21!null │ │ │ ├── multiplicity: left-rows(zero-or-more), right-rows(exactly-one) @@ -1539,8 +1557,8 @@ project │ │ │ └── filters │ │ │ └── l_partkey:2 = p_partkey:17 [outer=(2,17), constraints=(/2: (/NULL - ]; /17: (/NULL - ]), fd=(2)==(17), (17)==(2)] │ │ └── projections - │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, outer=(6,7,21)] - │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, outer=(6,7)] + │ │ ├── CASE WHEN p_type:21 LIKE 'PROMO%' THEN l_extendedprice:6 * (1.0 - l_discount:7) ELSE 0.0 END [as=column26:26, outer=(6,7,21), immutable] + │ │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column28:28, outer=(6,7), immutable] │ └── aggregations │ ├── sum [as=sum:27, outer=(26)] │ │ └── column26:26 @@ -1597,30 +1615,36 @@ ORDER BY ---- sort ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_phone:5!null total_revenue:25!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3,5,25) ├── ordering: +1 └── project ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_phone:5!null sum:25!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3,5,25) └── inner-join (lookup supplier) ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_phone:5!null l_suppkey:10!null sum:25!null ├── key columns: [10] = [1] ├── lookup columns are key + ├── immutable ├── key: (10) ├── fd: (1)-->(2,3,5), (10)-->(25), (1)==(10), (10)==(1) ├── select │ ├── columns: l_suppkey:10!null sum:25!null + │ ├── immutable │ ├── key: (10) │ ├── fd: (10)-->(25) │ ├── group-by │ │ ├── columns: l_suppkey:10!null sum:25!null │ │ ├── grouping columns: l_suppkey:10!null + │ │ ├── immutable │ │ ├── key: (10) │ │ ├── fd: (10)-->(25) │ │ ├── project │ │ │ ├── columns: column24:24!null l_suppkey:10!null + │ │ │ ├── immutable │ │ │ ├── select │ │ │ │ ├── columns: l_suppkey:10!null l_extendedprice:13!null l_discount:14!null l_shipdate:18!null │ │ │ │ ├── scan lineitem @@ -1628,26 +1652,29 @@ sort │ │ │ │ └── filters │ │ │ │ └── (l_shipdate:18 >= '1996-01-01') AND (l_shipdate:18 < '1996-04-01') [outer=(18), constraints=(/18: [/'1996-01-01' - /'1996-03-31']; tight)] │ │ │ └── projections - │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, outer=(13,14)] + │ │ │ └── l_extendedprice:13 * (1.0 - l_discount:14) [as=column24:24, outer=(13,14), immutable] │ │ └── aggregations │ │ └── sum [as=sum:25, outer=(24)] │ │ └── column24:24 │ └── filters - │ └── eq [outer=(25), subquery, constraints=(/25: (/NULL - ])] + │ └── eq [outer=(25), immutable, subquery, constraints=(/25: (/NULL - ])] │ ├── sum:25 │ └── subquery │ └── scalar-group-by │ ├── columns: max:44 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(44) │ ├── group-by │ │ ├── columns: l_suppkey:28!null sum:43!null │ │ ├── grouping columns: l_suppkey:28!null + │ │ ├── immutable │ │ ├── key: (28) │ │ ├── fd: (28)-->(43) │ │ ├── project │ │ │ ├── columns: column42:42!null l_suppkey:28!null + │ │ │ ├── immutable │ │ │ ├── select │ │ │ │ ├── columns: l_suppkey:28!null l_extendedprice:31!null l_discount:32!null l_shipdate:36!null │ │ │ │ ├── scan lineitem @@ -1655,7 +1682,7 @@ sort │ │ │ │ └── filters │ │ │ │ └── (l_shipdate:36 >= '1996-01-01') AND (l_shipdate:36 < '1996-04-01') [outer=(36), constraints=(/36: [/'1996-01-01' - /'1996-03-31']; tight)] │ │ │ └── projections - │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, outer=(31,32)] + │ │ │ └── l_extendedprice:31 * (1.0 - l_discount:32) [as=column42:42, outer=(31,32), immutable] │ │ └── aggregations │ │ └── sum [as=sum:43, outer=(42)] │ │ └── column42:42 @@ -1795,25 +1822,30 @@ WHERE project ├── columns: avg_yearly:45 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(45) ├── scalar-group-by │ ├── columns: sum:44 │ ├── cardinality: [1 - 1] + │ ├── immutable │ ├── key: () │ ├── fd: ()-->(44) │ ├── inner-join (lookup lineitem) │ │ ├── columns: l_partkey:2!null l_quantity:5!null l_extendedprice:6!null p_partkey:17!null "?column?":43!null │ │ ├── key columns: [1 4] = [1 4] │ │ ├── lookup columns are key + │ │ ├── immutable │ │ ├── fd: (17)-->(43), (2)==(17), (17)==(2) │ │ ├── inner-join (lookup lineitem@l_pk) │ │ │ ├── columns: l_orderkey:1!null l_partkey:2!null l_linenumber:4!null p_partkey:17!null "?column?":43 │ │ │ ├── key columns: [17] = [2] + │ │ │ ├── immutable │ │ │ ├── key: (1,4) │ │ │ ├── fd: (17)-->(43), (1,4)-->(2), (2)==(17), (17)==(2) │ │ │ ├── project │ │ │ │ ├── columns: "?column?":43 p_partkey:17!null + │ │ │ │ ├── immutable │ │ │ │ ├── key: (17) │ │ │ │ ├── fd: (17)-->(43) │ │ │ │ ├── group-by @@ -1853,7 +1885,7 @@ project │ │ │ │ │ └── avg [as=avg:42, outer=(30)] │ │ │ │ │ └── l_quantity:30 │ │ │ │ └── projections - │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, outer=(42)] + │ │ │ │ └── avg:42 * 0.2 [as="?column?":43, outer=(42), immutable] │ │ │ └── filters (true) │ │ └── filters │ │ └── l_quantity:5 < "?column?":43 [outer=(5,43), constraints=(/5: (/NULL - ]; /43: (/NULL - ])] @@ -2048,10 +2080,12 @@ WHERE scalar-group-by ├── columns: revenue:27 ├── cardinality: [1 - 1] + ├── immutable ├── key: () ├── fd: ()-->(27) ├── project │ ├── columns: column26:26!null + │ ├── immutable │ ├── inner-join (lookup part) │ │ ├── columns: l_partkey:2!null l_quantity:5!null l_extendedprice:6!null l_discount:7!null l_shipinstruct:14!null l_shipmode:15!null p_partkey:17!null p_brand:20!null p_size:22!null p_container:23!null │ │ ├── key columns: [2] = [17] @@ -2069,7 +2103,7 @@ scalar-group-by │ │ ├── ((((((p_brand:20 = 'Brand#12') AND (p_container:23 IN ('SM BOX', 'SM CASE', 'SM PACK', 'SM PKG'))) AND (l_quantity:5 >= 1.0)) AND (l_quantity:5 <= 11.0)) AND (p_size:22 <= 5)) OR (((((p_brand:20 = 'Brand#23') AND (p_container:23 IN ('MED BAG', 'MED BOX', 'MED PACK', 'MED PKG'))) AND (l_quantity:5 >= 10.0)) AND (l_quantity:5 <= 20.0)) AND (p_size:22 <= 10))) OR (((((p_brand:20 = 'Brand#34') AND (p_container:23 IN ('LG BOX', 'LG CASE', 'LG PACK', 'LG PKG'))) AND (l_quantity:5 >= 20.0)) AND (l_quantity:5 <= 30.0)) AND (p_size:22 <= 15)) [outer=(5,20,22,23), constraints=(/5: [/1.0 - /30.0]; /20: [/'Brand#12' - /'Brand#12'] [/'Brand#23' - /'Brand#23'] [/'Brand#34' - /'Brand#34']; /22: (/NULL - /15]; /23: [/'LG BOX' - /'LG BOX'] [/'LG CASE' - /'LG CASE'] [/'LG PACK' - /'LG PACK'] [/'LG PKG' - /'LG PKG'] [/'MED BAG' - /'MED BAG'] [/'MED BOX' - /'MED BOX'] [/'MED PACK' - /'MED PACK'] [/'MED PKG' - /'MED PKG'] [/'SM BOX' - /'SM BOX'] [/'SM CASE' - /'SM CASE'] [/'SM PACK' - /'SM PACK'] [/'SM PKG' - /'SM PKG'])] │ │ └── p_size:22 >= 1 [outer=(22), constraints=(/22: [/1 - ]; tight)] │ └── projections - │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, outer=(6,7)] + │ └── l_extendedprice:6 * (1.0 - l_discount:7) [as=column26:26, outer=(6,7), immutable] └── aggregations └── sum [as=sum:27, outer=(26)] └── column26:26 @@ -2129,37 +2163,46 @@ ORDER BY ---- sort ├── columns: s_name:2!null s_address:3!null + ├── immutable ├── ordering: +2 └── project ├── columns: s_name:2!null s_address:3!null + ├── immutable └── inner-join (lookup nation) ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_nationkey:4!null n_nationkey:8!null n_name:9!null ├── key columns: [4] = [8] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: ()-->(9), (1)-->(2-4), (4)==(8), (8)==(4) ├── project │ ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_nationkey:4!null + │ ├── immutable │ ├── key: (1) │ ├── fd: (1)-->(2-4) │ └── inner-join (lookup supplier) │ ├── columns: s_suppkey:1!null s_name:2!null s_address:3!null s_nationkey:4!null ps_suppkey:13!null │ ├── key columns: [13] = [1] │ ├── lookup columns are key + │ ├── immutable │ ├── key: (13) │ ├── fd: (1)-->(2-4), (1)==(13), (13)==(1) │ ├── distinct-on │ │ ├── columns: ps_suppkey:13!null │ │ ├── grouping columns: ps_suppkey:13!null + │ │ ├── immutable │ │ ├── key: (13) │ │ └── semi-join (hash) │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null + │ │ ├── immutable │ │ ├── key: (12,13) │ │ ├── project │ │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null + │ │ │ ├── immutable │ │ │ ├── key: (12,13) │ │ │ └── select │ │ │ ├── columns: ps_partkey:12!null ps_suppkey:13!null ps_availqty:14!null sum:42 + │ │ │ ├── immutable │ │ │ ├── key: (12,13) │ │ │ ├── fd: (12,13)-->(14,42) │ │ │ ├── group-by @@ -2190,7 +2233,7 @@ sort │ │ │ │ └── const-agg [as=ps_availqty:14, outer=(14)] │ │ │ │ └── ps_availqty:14 │ │ │ └── filters - │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [outer=(14,42), constraints=(/14: (/NULL - ])] + │ │ │ └── ps_availqty:14 > (sum:42 * 0.5) [outer=(14,42), immutable, constraints=(/14: (/NULL - ])] │ │ ├── select │ │ │ ├── columns: p_partkey:17!null p_name:18!null │ │ │ ├── key: (17) diff --git a/pkg/sql/opt/xform/testdata/external/trading b/pkg/sql/opt/xform/testdata/external/trading index 1d0e0eff7284..52ab6fbdbafc 100644 --- a/pkg/sql/opt/xform/testdata/external/trading +++ b/pkg/sql/opt/xform/testdata/external/trading @@ -549,6 +549,7 @@ FROM CardsView WHERE Version > 1584421773604892000.0000000000 ---- project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null buyprice:9!null sellprice:10!null desiredinventory:12!null actualinventory:13!null version:15!null discount:11!null maxinventory:14!null + ├── immutable ├── stats: [rows=1] ├── key: (15) ├── fd: (1)-->(2-6,9-15), (2,4,5)~~>(1,3,6), (15)-->(1-6,9-14) @@ -556,11 +557,13 @@ project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null dealerid:7!null cardid:8!null buyprice:9!null sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null version:15!null ├── key columns: [8] = [1] ├── lookup columns are key + ├── immutable ├── stats: [rows=1, distinct(1)=0.0201621393, null(1)=0, distinct(8)=0.0201621393, null(8)=0] ├── key: (8) ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1) ├── index-join cardsinfo │ ├── columns: dealerid:7!null cardid:8!null buyprice:9!null sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null version:15!null + │ ├── immutable │ ├── stats: [rows=0.0201621426, distinct(7)=0.0201621426, null(7)=0, distinct(8)=0.0201621393, null(8)=0, distinct(9)=0.02016214, null(9)=0, distinct(10)=0.02016214, null(10)=0, distinct(11)=0.02016214, null(11)=0, distinct(12)=0.02016214, null(12)=0, distinct(13)=0.02016214, null(13)=0, distinct(14)=0.02016214, null(14)=0, distinct(15)=0.0201621426, null(15)=0, distinct(7,15)=0.0201621426, null(7,15)=0] │ │ histogram(15)= 0 0 0.020162 0 │ │ <--- 1584421773604892000.0000000000 ---------- 1584421778604892000 @@ -735,6 +738,7 @@ LIMIT 50 project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null buyprice:9!null sellprice:10!null desiredinventory:12!null actualinventory:13!null version:15!null discount:11!null maxinventory:14!null twodaysales:25 ├── cardinality: [0 - 50] + ├── immutable ├── stats: [rows=50] ├── key: (15,25) ├── fd: (1)-->(2-6,9-15), (2,4,5)~~>(1,3,6), (15)-->(1-6,9-14) @@ -743,12 +747,14 @@ project │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:24 │ ├── internal-ordering: +2,+4,+5 │ ├── cardinality: [0 - 50] + │ ├── immutable │ ├── stats: [rows=50] │ ├── key: (8) │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,24), (15)-->(8-14), (1)==(8), (8)==(1) │ ├── ordering: +2,+4,+5 │ ├── sort │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:24 + │ │ ├── immutable │ │ ├── stats: [rows=19000, distinct(8)=19000, null(8)=0] │ │ ├── key: (8) │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,24), (15)-->(8-14), (1)==(8), (8)==(1) @@ -757,11 +763,13 @@ project │ │ └── group-by │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:24 │ │ ├── grouping columns: cardsinfo.cardid:8!null + │ │ ├── immutable │ │ ├── stats: [rows=19000, distinct(8)=19000, null(8)=0] │ │ ├── key: (8) │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,24), (15)-->(8-14), (1)==(8), (8)==(1) │ │ ├── right-join (hash) │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null transactiondetails.dealerid:16 isbuy:17 transactiondate:18 transactiondetails.cardid:19 quantity:20 + │ │ │ ├── immutable │ │ │ ├── stats: [rows=5523583.18, distinct(8)=19000, null(8)=0, distinct(19)=19000, null(19)=0] │ │ │ ├── key: (8,18-20) │ │ │ ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1), (8,18-20)-->(16,17) @@ -775,11 +783,13 @@ project │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null │ │ │ │ ├── left ordering: +1 │ │ │ │ ├── right ordering: +8 + │ │ │ │ ├── immutable │ │ │ │ ├── stats: [rows=29618.4611, distinct(1)=19000, null(1)=0, distinct(2)=11668.1409, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5572.85686, null(6)=0, distinct(7)=1, null(7)=0, distinct(8)=19000, null(8)=0, distinct(9)=21037.9959, null(9)=0, distinct(10)=21037.9959, null(10)=0, distinct(11)=21037.9959, null(11)=0, distinct(12)=21037.9959, null(12)=0, distinct(13)=21037.9959, null(13)=0, distinct(14)=21037.9959, null(14)=0, distinct(15)=23225.5851, null(15)=0] │ │ │ │ ├── key: (8) │ │ │ │ ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1) │ │ │ │ ├── select │ │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null + │ │ │ │ │ ├── immutable │ │ │ │ │ ├── stats: [rows=19000, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0] │ │ │ │ │ ├── key: (1) │ │ │ │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6) @@ -791,7 +801,7 @@ project │ │ │ │ │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6) │ │ │ │ │ │ └── ordering: +1 │ │ │ │ │ └── filters - │ │ │ │ │ └── (name:2, setname:4, number:5) > ('Shock', '7E', 248) [outer=(2,4,5), constraints=(/2/4/5: [/'Shock'/'7E'/249 - ]; tight)] + │ │ │ │ │ └── (name:2, setname:4, number:5) > ('Shock', '7E', 248) [outer=(2,4,5), immutable, constraints=(/2/4/5: [/'Shock'/'7E'/249 - ]; tight)] │ │ │ │ ├── scan cardsinfo │ │ │ │ │ ├── columns: cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null │ │ │ │ │ ├── constraint: /7/8: [/1 - /1] @@ -942,9 +952,9 @@ sort │ │ └── filters │ │ └── id:16 = transactiondetails.cardid:4 [outer=(4,16), constraints=(/4: (/NULL - ]; /16: (/NULL - ]), fd=(4)==(16), (16)==(4)] │ └── projections - │ ├── transactiondetails.sellprice:6 * quantity:5 [as=column31:31, outer=(5,6)] - │ ├── transactiondetails.buyprice:7 * quantity:5 [as=column33:33, outer=(5,7)] - │ ├── quantity:5 * (transactiondetails.sellprice:6 - transactiondetails.buyprice:7) [as=column35:35, outer=(5-7)] + │ ├── transactiondetails.sellprice:6 * quantity:5 [as=column31:31, outer=(5,6), immutable] + │ ├── transactiondetails.buyprice:7 * quantity:5 [as=column33:33, outer=(5,7), immutable] + │ ├── quantity:5 * (transactiondetails.sellprice:6 - transactiondetails.buyprice:7) [as=column35:35, outer=(5-7), immutable] │ └── extract('day', transactiondate:3) [as=column37:37, outer=(3), stable] └── aggregations ├── sum [as=sum:32, outer=(31)] diff --git a/pkg/sql/opt/xform/testdata/external/trading-mutation b/pkg/sql/opt/xform/testdata/external/trading-mutation index 40ebb10a4454..b48c839d8c85 100644 --- a/pkg/sql/opt/xform/testdata/external/trading-mutation +++ b/pkg/sql/opt/xform/testdata/external/trading-mutation @@ -557,6 +557,7 @@ FROM CardsView WHERE Version > 1584421773604892000.0000000000 ---- project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null buyprice:9!null sellprice:10!null desiredinventory:12!null actualinventory:13!null version:15!null discount:11!null maxinventory:14!null + ├── immutable ├── stats: [rows=1] ├── key: (15) ├── fd: (1)-->(2-6,9-15), (2,4,5)~~>(1,3,6), (15)-->(1-6,9-14) @@ -564,11 +565,13 @@ project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null dealerid:7!null cardid:8!null buyprice:9!null sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null version:15!null ├── key columns: [8] = [1] ├── lookup columns are key + ├── immutable ├── stats: [rows=1, distinct(1)=6.35833333e-05, null(1)=0, distinct(8)=6.35833333e-05, null(8)=0] ├── key: (8) ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1) ├── index-join cardsinfo │ ├── columns: dealerid:7!null cardid:8!null buyprice:9!null sellprice:10!null discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null version:15!null + │ ├── immutable │ ├── stats: [rows=6.35833333e-05, distinct(7)=6.35833333e-05, null(7)=0, distinct(8)=6.35833333e-05, null(8)=0, distinct(9)=6.35833333e-05, null(9)=0, distinct(10)=6.35833333e-05, null(10)=0, distinct(11)=6.35833333e-05, null(11)=0, distinct(12)=6.35833333e-05, null(12)=0, distinct(13)=6.35833333e-05, null(13)=0, distinct(14)=6.35833333e-05, null(14)=0, distinct(15)=6.35833333e-05, null(15)=0, distinct(7,15)=6.35833333e-05, null(7,15)=0] │ │ histogram(15)= │ ├── key: (8) @@ -739,6 +742,7 @@ LIMIT 50 project ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null buyprice:9!null sellprice:10!null desiredinventory:12!null actualinventory:13!null version:15!null discount:11!null maxinventory:14!null twodaysales:31 ├── cardinality: [0 - 50] + ├── immutable ├── stats: [rows=50] ├── key: (15,31) ├── fd: (1)-->(2-6,9-15), (2,4,5)~~>(1,3,6), (15)-->(1-6,9-14) @@ -747,12 +751,14 @@ project │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:30 │ ├── internal-ordering: +2,+4,+5 │ ├── cardinality: [0 - 50] + │ ├── immutable │ ├── stats: [rows=50] │ ├── key: (8) │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,30), (15)-->(8-14), (1)==(8), (8)==(1) │ ├── ordering: +2,+4,+5 │ ├── sort │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:30 + │ │ ├── immutable │ │ ├── stats: [rows=19000, distinct(8)=19000, null(8)=0] │ │ ├── key: (8) │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,30), (15)-->(8-14), (1)==(8), (8)==(1) @@ -761,11 +767,13 @@ project │ │ └── group-by │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null sum:30 │ │ ├── grouping columns: cardsinfo.cardid:8!null + │ │ ├── immutable │ │ ├── stats: [rows=19000, distinct(8)=19000, null(8)=0] │ │ ├── key: (8) │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(1-6,9-15,30), (15)-->(8-14), (1)==(8), (8)==(1) │ │ ├── right-join (hash) │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null transactiondetails.dealerid:20 isbuy:21 transactiondate:22 transactiondetails.cardid:23 quantity:24 + │ │ │ ├── immutable │ │ │ ├── stats: [rows=5523583.18, distinct(8)=19000, null(8)=0, distinct(23)=19000, null(23)=0] │ │ │ ├── key: (8,22-24) │ │ │ ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1), (8,22-24)-->(20,21) @@ -779,11 +787,13 @@ project │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null │ │ │ │ ├── left ordering: +1 │ │ │ │ ├── right ordering: +8 + │ │ │ │ ├── immutable │ │ │ │ ├── stats: [rows=29618.4611, distinct(1)=19000, null(1)=0, distinct(2)=11668.1409, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5572.85686, null(6)=0, distinct(7)=1, null(7)=0, distinct(8)=19000, null(8)=0, distinct(9)=21037.9959, null(9)=0, distinct(10)=21037.9959, null(10)=0, distinct(11)=21037.9959, null(11)=0, distinct(12)=21037.9959, null(12)=0, distinct(13)=21037.9959, null(13)=0, distinct(14)=21037.9959, null(14)=0, distinct(15)=23225.5851, null(15)=0] │ │ │ │ ├── key: (8) │ │ │ │ ├── fd: ()-->(7), (1)-->(2-6), (2,4,5)~~>(1,3,6), (8)-->(9-15), (15)-->(8-14), (1)==(8), (8)==(1) │ │ │ │ ├── select │ │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null + │ │ │ │ │ ├── immutable │ │ │ │ │ ├── stats: [rows=19000, distinct(1)=19000, null(1)=0, distinct(2)=13000, null(2)=0, distinct(5)=829, null(5)=0, distinct(6)=5601.15328, null(6)=0] │ │ │ │ │ ├── key: (1) │ │ │ │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6) @@ -795,7 +805,7 @@ project │ │ │ │ │ │ ├── fd: (1)-->(2-6), (2,4,5)~~>(1,3,6) │ │ │ │ │ │ └── ordering: +1 │ │ │ │ │ └── filters - │ │ │ │ │ └── (name:2, setname:4, number:5) > ('Shock', '7E', 248) [outer=(2,4,5), constraints=(/2/4/5: [/'Shock'/'7E'/249 - ]; tight)] + │ │ │ │ │ └── (name:2, setname:4, number:5) > ('Shock', '7E', 248) [outer=(2,4,5), immutable, constraints=(/2/4/5: [/'Shock'/'7E'/249 - ]; tight)] │ │ │ │ ├── scan cardsinfo │ │ │ │ │ ├── columns: cardsinfo.dealerid:7!null cardsinfo.cardid:8!null cardsinfo.buyprice:9!null cardsinfo.sellprice:10!null cardsinfo.discount:11!null desiredinventory:12!null actualinventory:13!null maxinventory:14!null cardsinfo.version:15!null │ │ │ │ │ ├── constraint: /7/8: [/1 - /1] @@ -946,9 +956,9 @@ sort │ │ └── filters │ │ └── id:20 = transactiondetails.cardid:4 [outer=(4,20), constraints=(/4: (/NULL - ]; /20: (/NULL - ]), fd=(4)==(20), (20)==(4)] │ └── projections - │ ├── transactiondetails.sellprice:6 * quantity:5 [as=column39:39, outer=(5,6)] - │ ├── transactiondetails.buyprice:7 * quantity:5 [as=column41:41, outer=(5,7)] - │ ├── quantity:5 * (transactiondetails.sellprice:6 - transactiondetails.buyprice:7) [as=column43:43, outer=(5-7)] + │ ├── transactiondetails.sellprice:6 * quantity:5 [as=column39:39, outer=(5,6), immutable] + │ ├── transactiondetails.buyprice:7 * quantity:5 [as=column41:41, outer=(5,7), immutable] + │ ├── quantity:5 * (transactiondetails.sellprice:6 - transactiondetails.buyprice:7) [as=column43:43, outer=(5-7), immutable] │ └── extract('day', transactiondate:3) [as=column45:45, outer=(3), stable] └── aggregations ├── sum [as=sum:40, outer=(39)] diff --git a/pkg/sql/opt/xform/testdata/physprops/ordering b/pkg/sql/opt/xform/testdata/physprops/ordering index dcd4ddebf5f9..2c50e2b95161 100644 --- a/pkg/sql/opt/xform/testdata/physprops/ordering +++ b/pkg/sql/opt/xform/testdata/physprops/ordering @@ -130,6 +130,7 @@ SELECT x+1 AS r, y FROM a ORDER BY x, y DESC ---- project ├── columns: r:5!null y:2!null [hidden: x:1!null] + ├── immutable ├── key: (1,2) ├── fd: (1)-->(5) ├── ordering: +1,-2 @@ -138,7 +139,7 @@ project │ ├── key: (1,2) │ └── ordering: +1,-2 └── projections - └── x:1 + 1 [as=r:5, outer=(1)] + └── x:1 + 1 [as=r:5, outer=(1), immutable] # Pass through ordering to scan operator that can't support it. opt @@ -146,11 +147,13 @@ SELECT y, x, z+1 AS r FROM a ORDER BY x, y ---- sort (segmented) ├── columns: y:2!null x:1!null r:5 + ├── immutable ├── key: (1,2) ├── fd: (1,2)-->(5) ├── ordering: +1,+2 └── project ├── columns: r:5 x:1!null y:2!null + ├── immutable ├── key: (1,2) ├── fd: (1,2)-->(5) ├── ordering: +1 @@ -160,7 +163,7 @@ sort (segmented) │ ├── fd: (1,2)-->(3) │ └── ordering: +1 └── projections - └── z:3 + 1 [as=r:5, outer=(3)] + └── z:3 + 1 [as=r:5, outer=(3), immutable] # Ordering cannot be passed through because it includes computed column. opt @@ -168,11 +171,13 @@ SELECT x, y+1 AS computed, y FROM a ORDER BY x, computed ---- sort (segmented) ├── columns: x:1!null computed:5!null y:2!null + ├── immutable ├── key: (1,2) ├── fd: (1,2)-->(5) ├── ordering: +1,+5 └── project ├── columns: computed:5!null x:1!null y:2!null + ├── immutable ├── key: (1,2) ├── fd: (1,2)-->(5) ├── ordering: +1 @@ -181,7 +186,7 @@ sort (segmented) │ ├── key: (1,2) │ └── ordering: +1 └── projections - └── y:2 + 1.0 [as=computed:5, outer=(2)] + └── y:2 + 1.0 [as=computed:5, outer=(2), immutable] # Ordering on an expression that gets constant-folded to a simple variable. # Example from #43360: a boolean (possibly a placeholder) indicates the sort @@ -243,6 +248,7 @@ SELECT y, x-1 AS z FROM a WHERE x>y ORDER BY x, y DESC ---- project ├── columns: y:2!null z:5!null [hidden: x:1!null] + ├── immutable ├── key: (1,2) ├── fd: (1)-->(5) ├── ordering: +1,-2 @@ -257,7 +263,7 @@ project │ └── filters │ └── x:1 > y:2 [outer=(1,2)] └── projections - └── x:1 - 1 [as=z:5, outer=(1)] + └── x:1 - 1 [as=z:5, outer=(1), immutable] memo SELECT y, x-1 AS z FROM a WHERE x>y ORDER BY x, y DESC @@ -990,10 +996,12 @@ limit ├── columns: a:1!null b:2!null c:3!null ├── internal-ordering: +1,+2 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1-3) ├── ordering: +1,+2 ├── select │ ├── columns: a:1!null b:2!null c:3!null + │ ├── immutable │ ├── key: (1-3) │ ├── ordering: +1,+2 │ ├── limit hint: 10.00 @@ -1003,7 +1011,7 @@ limit │ │ ├── ordering: +1,+2 │ │ └── limit hint: 30.00 │ └── filters - │ └── c:3 < (a:1 + b:2) [outer=(1-3)] + │ └── c:3 < (a:1 + b:2) [outer=(1-3), immutable] └── 10 opt @@ -1043,15 +1051,18 @@ SELECT * FROM (SELECT * FROM abc WHERE a+b>c ORDER BY a, b LIMIT 10) ORDER BY b sort ├── columns: a:1!null b:2!null c:3!null ├── cardinality: [0 - 10] + ├── immutable ├── key: (1-3) ├── ordering: +2 └── limit ├── columns: a:1!null b:2!null c:3!null ├── internal-ordering: +1,+2 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1-3) ├── select │ ├── columns: a:1!null b:2!null c:3!null + │ ├── immutable │ ├── key: (1-3) │ ├── ordering: +1,+2 │ ├── limit hint: 10.00 @@ -1061,7 +1072,7 @@ sort │ │ ├── ordering: +1,+2 │ │ └── limit hint: 30.00 │ └── filters - │ └── c:3 < (a:1 + b:2) [outer=(1-3)] + │ └── c:3 < (a:1 + b:2) [outer=(1-3), immutable] └── 10 opt @@ -1101,10 +1112,12 @@ limit ├── columns: a:1!null b:2!null c:3!null ├── internal-ordering: +1,+2 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1-3) ├── ordering: +1 ├── select │ ├── columns: a:1!null b:2!null c:3!null + │ ├── immutable │ ├── key: (1-3) │ ├── ordering: +1,+2 │ ├── limit hint: 10.00 @@ -1114,7 +1127,7 @@ limit │ │ ├── ordering: +1,+2 │ │ └── limit hint: 30.00 │ └── filters - │ └── c:3 < (a:1 + b:2) [outer=(1-3)] + │ └── c:3 < (a:1 + b:2) [outer=(1-3), immutable] └── 10 opt @@ -1150,10 +1163,12 @@ limit ├── columns: a:1!null b:2!null c:3!null ├── internal-ordering: +1,+2 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1-3) ├── ordering: +1,+2,+3 ├── select │ ├── columns: a:1!null b:2!null c:3!null + │ ├── immutable │ ├── key: (1-3) │ ├── ordering: +1,+2,+3 │ ├── limit hint: 10.00 @@ -1163,7 +1178,7 @@ limit │ │ ├── ordering: +1,+2,+3 │ │ └── limit hint: 30.00 │ └── filters - │ └── c:3 < (a:1 + b:2) [outer=(1-3)] + │ └── c:3 < (a:1 + b:2) [outer=(1-3), immutable] └── 10 opt @@ -1665,6 +1680,7 @@ sort │ └── project │ ├── columns: b_new:11 abcd.a:6 abcd.b:7 abcd.c:8 abcd.d:9 rowid:10!null │ ├── cardinality: [0 - 10] + │ ├── immutable │ ├── key: (10) │ ├── fd: (10)-->(6-9), (7)-->(11) │ ├── scan abcd@cd @@ -1673,7 +1689,7 @@ sort │ │ ├── key: (10) │ │ └── fd: (10)-->(6-9) │ └── projections - │ └── abcd.b:7 + 1 [as=b_new:11, outer=(7)] + │ └── abcd.b:7 + 1 [as=b_new:11, outer=(7), immutable] └── with-scan &1 ├── columns: a:12 b:13 c:14 d:15 ├── mapping: @@ -1718,6 +1734,7 @@ sort │ └── project │ ├── columns: b_new:11 abcd.a:6 abcd.b:7 abcd.c:8 abcd.d:9 rowid:10!null │ ├── cardinality: [0 - 10] + │ ├── immutable │ ├── key: (10) │ ├── fd: (10)-->(6-9), (7)-->(11) │ ├── scan abcd@cd @@ -1726,7 +1743,7 @@ sort │ │ ├── key: (10) │ │ └── fd: (10)-->(6-9) │ └── projections - │ └── abcd.b:7 + 1 [as=b_new:11, outer=(7)] + │ └── abcd.b:7 + 1 [as=b_new:11, outer=(7), immutable] └── select ├── columns: a:12 b:13!null c:14!null d:15 ├── cardinality: [0 - 10] diff --git a/pkg/sql/opt/xform/testdata/physprops/presentation b/pkg/sql/opt/xform/testdata/physprops/presentation index d8269dd86c88..966100a42bbf 100644 --- a/pkg/sql/opt/xform/testdata/physprops/presentation +++ b/pkg/sql/opt/xform/testdata/physprops/presentation @@ -36,6 +36,7 @@ SELECT 1+a.y AS plus, a.x FROM a ---- project ├── columns: plus:3 x:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(3) ├── scan a @@ -43,7 +44,7 @@ project │ ├── key: (1) │ └── fd: (1)-->(2) └── projections - └── y:2 + 1 [as=plus:3, outer=(2)] + └── y:2 + 1 [as=plus:3, outer=(2), immutable] # Join operator. opt diff --git a/pkg/sql/opt/xform/testdata/rules/computed b/pkg/sql/opt/xform/testdata/rules/computed index 09f83ebde9fe..1f013f657cf1 100644 --- a/pkg/sql/opt/xform/testdata/rules/computed +++ b/pkg/sql/opt/xform/testdata/rules/computed @@ -81,8 +81,10 @@ SELECT k_int FROM t_mult WHERE (k_int, k_int_2) > (1, 2) ---- project ├── columns: k_int:1!null + ├── immutable └── select ├── columns: k_int:1!null k_int_2:2 + ├── immutable ├── scan t_mult │ ├── columns: k_int:1 k_int_2:2 │ └── computed column expressions @@ -93,7 +95,7 @@ project │ └── c_mult_2:5 │ └── k_int:1 + 1 └── filters - └── (k_int:1, k_int_2:2) > (1, 2) [outer=(1,2), constraints=(/1/2: [/1/3 - ]; tight)] + └── (k_int:1, k_int_2:2) > (1, 2) [outer=(1,2), immutable, constraints=(/1/2: [/1/3 - ]; tight)] # Don't constrain when filter has multiple spans. opt diff --git a/pkg/sql/opt/xform/testdata/rules/groupby b/pkg/sql/opt/xform/testdata/rules/groupby index bb686abee482..1a90e794f4b8 100644 --- a/pkg/sql/opt/xform/testdata/rules/groupby +++ b/pkg/sql/opt/xform/testdata/rules/groupby @@ -766,6 +766,7 @@ SELECT v + 1, min(w), v FROM kuvw WHERE v = 5 AND w IS NOT NULL GROUP BY v project ├── columns: "?column?":6!null min:5!null v:3!null ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(3,5,6) ├── project @@ -782,7 +783,7 @@ project │ └── projections │ └── w:4 [as=min:5, outer=(4)] └── projections - └── v:3 + 1 [as="?column?":6, outer=(3)] + └── v:3 + 1 [as="?column?":6, outer=(3), immutable] # Add const_agg function, as well as max function. opt expect=ReplaceMaxWithLimit @@ -791,6 +792,7 @@ SELECT v + 1, max(w), v FROM kuvw WHERE v = 5 GROUP BY v project ├── columns: "?column?":6!null max:5 v:3!null ├── cardinality: [0 - 1] + ├── immutable ├── key: () ├── fd: ()-->(3,5,6) ├── project @@ -807,7 +809,7 @@ project │ └── projections │ └── w:4 [as=max:5, outer=(4)] └── projections - └── v:3 + 1 [as="?column?":6, outer=(3)] + └── v:3 + 1 [as="?column?":6, outer=(3), immutable] # Use multiple grouping columns with min function. opt expect=ReplaceMinWithLimit diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join index 29828aafb4bc..ba78e5089a7d 100644 --- a/pkg/sql/opt/xform/testdata/rules/join +++ b/pkg/sql/opt/xform/testdata/rules/join @@ -2271,6 +2271,7 @@ SELECT b,a FROM t5 WHERE b @> '{"a":1}' ---- index-join t5 ├── columns: b:2 a:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2) └── scan t5@b_idx @@ -2283,6 +2284,7 @@ SELECT b,a FROM t5 WHERE b @> '{"a":[[{"b":{"c":[{"d":"e"}]}}]]}' ---- index-join t5 ├── columns: b:2 a:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2) └── scan t5@b_idx @@ -2298,6 +2300,7 @@ inner-join (lookup t5) ├── columns: b:2 a:1!null ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── inner-join (zigzag t5@b_idx t5@b_idx) @@ -2307,7 +2310,7 @@ inner-join (lookup t5) │ ├── right fixed columns: [2] = ['{"c": 2}'] │ └── filters (true) └── filters - └── b:2 @> '{"a": 1, "c": 2}' [outer=(2)] + └── b:2 @> '{"a": 1, "c": 2}' [outer=(2), immutable] memo SELECT a FROM t5 WHERE b @> '{"a":1, "c":2}' @@ -2352,6 +2355,7 @@ inner-join (lookup t5) ├── columns: b:2 a:1!null ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── inner-join (zigzag t5@b_idx t5@b_idx) @@ -2361,7 +2365,7 @@ inner-join (lookup t5) │ ├── right fixed columns: [2] = ['{"a": [{"d": 3}]}'] │ └── filters (true) └── filters - └── b:2 @> '{"a": [{"b": "c", "d": 3}, 5]}' [outer=(2)] + └── b:2 @> '{"a": [{"b": "c", "d": 3}, 5]}' [outer=(2), immutable] # Regression test for issue where zero-column expressions could exist multiple # times in the tree, causing collisions. @@ -2878,7 +2882,7 @@ select │ ├── volatile, side-effects │ └── key: (1) └── filters - └── b:2 @> '{"a": 1, "c": 2}' [outer=(2)] + └── b:2 @> '{"a": 1, "c": 2}' [outer=(2), immutable] # -------------------------------------------------- # AssociateJoin diff --git a/pkg/sql/opt/xform/testdata/rules/select b/pkg/sql/opt/xform/testdata/rules/select index 8156dc5440f5..49ab4cd8743d 100644 --- a/pkg/sql/opt/xform/testdata/rules/select +++ b/pkg/sql/opt/xform/testdata/rules/select @@ -494,6 +494,7 @@ SELECT * FROM b WHERE v >= 1 AND v <= 10 AND k+u = 1 select ├── columns: k:1!null u:2 v:3!null j:4 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)-->(1,2,4) ├── index-join b @@ -508,7 +509,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(3), (3)-->(1) └── filters - └── (k:1 + u:2) = 1 [outer=(1,2)] + └── (k:1 + u:2) = 1 [outer=(1,2), immutable] memo SELECT * FROM b WHERE v >= 1 AND v <= 10 AND k+u = 1 @@ -550,6 +551,7 @@ SELECT * FROM b WHERE v >= 1 AND v <= 10 AND k+u = 1 AND k > 5 select ├── columns: k:1!null u:2 v:3!null j:4 ├── cardinality: [0 - 10] + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)-->(1,2,4) ├── index-join b @@ -571,7 +573,7 @@ select │ └── filters │ └── k:1 > 5 [outer=(1), constraints=(/1: [/6 - ]; tight)] └── filters - └── (k:1 + u:2) = 1 [outer=(1,2)] + └── (k:1 + u:2) = 1 [outer=(1,2), immutable] memo SELECT * FROM b WHERE v >= 1 AND v <= 10 AND k+u = 1 AND k > 5 @@ -625,6 +627,7 @@ SELECT * FROM b WHERE (u, k, v) > (1, 2, 3) AND (u, k, v) < (8, 9, 10) ---- select ├── columns: k:1!null u:2!null v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── index-join b @@ -637,8 +640,8 @@ select │ ├── key: (1) │ └── fd: (1)-->(2) └── filters - ├── (u:2, k:1, v:3) > (1, 2, 3) [outer=(1-3), constraints=(/2/1/3: [/1/2/4 - ]; tight)] - └── (u:2, k:1, v:3) < (8, 9, 10) [outer=(1-3), constraints=(/2/1/3: (/NULL - /8/9/9]; tight)] + ├── (u:2, k:1, v:3) > (1, 2, 3) [outer=(1-3), immutable, constraints=(/2/1/3: [/1/2/4 - ]; tight)] + └── (u:2, k:1, v:3) < (8, 9, 10) [outer=(1-3), immutable, constraints=(/2/1/3: (/NULL - /8/9/9]; tight)] memo SELECT * FROM b WHERE (u, k, v) > (1, 2, 3) AND (u, k, v) < (8, 9, 10) @@ -734,7 +737,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(3), (3)-->(1) └── filters - └── (k:1 + u:2) = 1 [outer=(1,2)] + └── (k:1 + u:2) = 1 [outer=(1,2), immutable] # -------------------------------------------------- # GenerateInvertedIndexScans @@ -746,9 +749,11 @@ SELECT k FROM b WHERE j @> '{"a": "b"}' ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── index-join b ├── columns: k:1!null j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(4) └── scan b@inv_idx @@ -789,11 +794,13 @@ SELECT k FROM b WHERE j @> '{"a": "b", "c": "d"}' ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── inner-join (lookup b) ├── columns: k:1!null j:4 ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(4) ├── inner-join (zigzag b@inv_idx b@inv_idx) @@ -803,7 +810,7 @@ project │ ├── right fixed columns: [4] = ['{"c": "d"}'] │ └── filters (true) └── filters - └── j:4 @> '{"a": "b", "c": "d"}' [outer=(4)] + └── j:4 @> '{"a": "b", "c": "d"}' [outer=(4), immutable] # Query requiring an index join with no remaining filter. opt @@ -811,10 +818,12 @@ SELECT u, k FROM b WHERE j @> '{"a": "b"}' ---- project ├── columns: u:2 k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2) └── index-join b ├── columns: k:1!null u:2 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2,4) └── scan b@inv_idx @@ -827,6 +836,7 @@ SELECT j, k FROM b WHERE j @> '{"a": "b"}' ---- index-join b ├── columns: j:4 k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(4) └── scan b@inv_idx @@ -839,6 +849,7 @@ SELECT * FROM b WHERE j @> '{"a": "b"}' ---- index-join b ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) └── scan b@inv_idx @@ -855,6 +866,7 @@ inner-join (lookup b) ├── columns: j:4 k:1!null ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(4) ├── inner-join (zigzag b@inv_idx b@inv_idx) @@ -864,7 +876,7 @@ inner-join (lookup b) │ ├── right fixed columns: [4] = ['{"c": "d"}'] │ └── filters (true) └── filters - └── j:4 @> '{"a": "b", "c": "d"}' [outer=(4)] + └── j:4 @> '{"a": "b", "c": "d"}' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '{"a": {"b": "c", "d": "e"}, "f": "g"}' @@ -873,6 +885,7 @@ inner-join (lookup b) ├── columns: k:1!null u:2 v:3 j:4 ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── inner-join (zigzag b@inv_idx b@inv_idx) @@ -882,13 +895,14 @@ inner-join (lookup b) │ ├── right fixed columns: [4] = ['{"a": {"d": "e"}}'] │ └── filters (true) └── filters - └── j:4 @> '{"a": {"b": "c", "d": "e"}, "f": "g"}' [outer=(4)] + └── j:4 @> '{"a": {"b": "c", "d": "e"}, "f": "g"}' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '{}' ---- select ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── scan b @@ -896,13 +910,14 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-4), (3)~~>(1,2,4) └── filters - └── j:4 @> '{}' [outer=(4)] + └── j:4 @> '{}' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '[]' ---- select ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── scan b @@ -910,13 +925,14 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-4), (3)~~>(1,2,4) └── filters - └── j:4 @> '[]' [outer=(4)] + └── j:4 @> '[]' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '2' ---- index-join b ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) └── scan b@inv_idx @@ -931,6 +947,7 @@ SELECT * FROM b WHERE j @> '[{}]' ---- select ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── scan b @@ -938,13 +955,14 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-4), (3)~~>(1,2,4) └── filters - └── j:4 @> '[{}]' [outer=(4)] + └── j:4 @> '[{}]' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '{"a": {}}' ---- select ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── scan b @@ -952,13 +970,14 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-4), (3)~~>(1,2,4) └── filters - └── j:4 @> '{"a": {}}' [outer=(4)] + └── j:4 @> '{"a": {}}' [outer=(4), immutable] opt SELECT * FROM b WHERE j @> '{"a": []}' ---- select ├── columns: k:1!null u:2 v:3 j:4 + ├── immutable ├── key: (1) ├── fd: (1)-->(2-4), (3)~~>(1,2,4) ├── scan b @@ -966,7 +985,7 @@ select │ ├── key: (1) │ └── fd: (1)-->(2-4), (3)~~>(1,2,4) └── filters - └── j:4 @> '{"a": []}' [outer=(4)] + └── j:4 @> '{"a": []}' [outer=(4), immutable] # GenerateInvertedIndexScans propagates row-level locking information. opt @@ -994,9 +1013,11 @@ SELECT k FROM c WHERE a @> ARRAY[1] ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── index-join c ├── columns: k:1!null a:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) └── scan c@inv_idx @@ -1009,11 +1030,13 @@ SELECT k FROM c WHERE a @> ARRAY[1,3,1,5] ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── inner-join (lookup c) ├── columns: k:1!null a:2 ├── key columns: [1] = [1] ├── lookup columns are key + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── inner-join (zigzag c@inv_idx c@inv_idx) @@ -1023,16 +1046,18 @@ project │ ├── right fixed columns: [2] = [ARRAY[3]] │ └── filters (true) └── filters - └── a:2 @> ARRAY[1,3,1,5] [outer=(2)] + └── a:2 @> ARRAY[1,3,1,5] [outer=(2), immutable] opt SELECT k FROM c WHERE a @> ARRAY[]::INT[] ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── select ├── columns: k:1!null a:2 + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── scan c @@ -1040,7 +1065,7 @@ project │ ├── key: (1) │ └── fd: (1)-->(2) └── filters - └── a:2 @> ARRAY[] [outer=(2)] + └── a:2 @> ARRAY[] [outer=(2), immutable] opt SELECT k FROM c WHERE a IS NULL @@ -1328,16 +1353,19 @@ SELECT k FROM b WHERE k = 1 OR j @> '{"foo": "bar"}' ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── distinct-on ├── columns: k:1!null j:4 ├── grouping columns: k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(4) ├── union-all │ ├── columns: k:1!null j:4 │ ├── left columns: k:1!null j:4 │ ├── right columns: k:5 j:8 + │ ├── immutable │ ├── scan b │ │ ├── columns: k:1!null j:4 │ │ ├── constraint: /1: [/1 - /1] @@ -1346,6 +1374,7 @@ project │ │ └── fd: ()-->(1,4) │ └── index-join b │ ├── columns: k:5!null j:8 + │ ├── immutable │ ├── key: (5) │ ├── fd: (5)-->(8) │ └── scan b@inv_idx @@ -1362,16 +1391,19 @@ SELECT k FROM c WHERE k = 1 OR a @> ARRAY[2] ---- project ├── columns: k:1!null + ├── immutable ├── key: (1) └── distinct-on ├── columns: k:1!null a:2 ├── grouping columns: k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2) ├── union-all │ ├── columns: k:1!null a:2 │ ├── left columns: k:1!null a:2 │ ├── right columns: k:4 a:5 + │ ├── immutable │ ├── scan c │ │ ├── columns: k:1!null a:2 │ │ ├── constraint: /1: [/1 - /1] @@ -1380,6 +1412,7 @@ project │ │ └── fd: ()-->(1,2) │ └── index-join c │ ├── columns: k:4!null a:5 + │ ├── immutable │ ├── key: (4) │ ├── fd: (4)-->(5) │ └── scan c@inv_idx @@ -2299,15 +2332,18 @@ SELECT u, j FROM b WHERE u = 1 OR j @> '{"foo": "bar"}' ---- project ├── columns: u:2 j:4 + ├── immutable └── distinct-on ├── columns: k:1!null u:2 j:4 ├── grouping columns: k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2,4) ├── union-all │ ├── columns: k:1!null u:2 j:4 │ ├── left columns: k:1!null u:2 j:4 │ ├── right columns: k:5 u:6 j:8 + │ ├── immutable │ ├── index-join b │ │ ├── columns: k:1!null u:2!null j:4 │ │ ├── key: (1) @@ -2319,6 +2355,7 @@ project │ │ └── fd: ()-->(2) │ └── index-join b │ ├── columns: k:5!null u:6 j:8 + │ ├── immutable │ ├── key: (5) │ ├── fd: (5)-->(6,8) │ └── scan b@inv_idx @@ -2337,15 +2374,18 @@ SELECT u, a FROM c WHERE u = 1 OR a @> ARRAY[2] ---- project ├── columns: u:3 a:2 + ├── immutable └── distinct-on ├── columns: k:1!null a:2 u:3 ├── grouping columns: k:1!null + ├── immutable ├── key: (1) ├── fd: (1)-->(2,3) ├── union-all │ ├── columns: k:1!null a:2 u:3 │ ├── left columns: k:1!null a:2 u:3 │ ├── right columns: k:4 a:5 u:6 + │ ├── immutable │ ├── index-join c │ │ ├── columns: k:1!null a:2 u:3!null │ │ ├── key: (1) @@ -2357,6 +2397,7 @@ project │ │ └── fd: ()-->(3) │ └── index-join c │ ├── columns: k:4!null a:5 u:6 + │ ├── immutable │ ├── key: (4) │ ├── fd: (4)-->(5,6) │ └── scan c@inv_idx diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 656bbb465012..7335233bbd19 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -77,29 +77,29 @@ func (oc *optCatalog) reset() { oc.cfg = oc.planner.execCfg.Gossip.DeprecatedSystemConfig(47150) } -// optSchema is a wrapper around sqlbase.DatabaseDescriptor that implements the -// cat.Object and cat.Schema interfaces. +// optSchema is a wrapper around sqlbase.ImmutableDatabaseDescriptor that +// implements the cat.Object and cat.Schema interfaces. type optSchema struct { planner *planner - desc *sqlbase.DatabaseDescriptor + desc *sqlbase.ImmutableDatabaseDescriptor name cat.SchemaName } // ID is part of the cat.Object interface. func (os *optSchema) ID() cat.StableID { - return cat.StableID(os.desc.ID) + return cat.StableID(os.desc.GetID()) } // PostgresDescriptorID is part of the cat.Object interface. func (os *optSchema) PostgresDescriptorID() cat.StableID { - return cat.StableID(os.desc.ID) + return cat.StableID(os.desc.GetID()) } // Equals is part of the cat.Object interface. func (os *optSchema) Equals(other cat.Object) bool { otherSchema, ok := other.(*optSchema) - return ok && os.desc.ID == otherSchema.desc.ID + return ok && os.desc.GetID() == otherSchema.desc.GetID() } // Name is part of the cat.Schema interface. @@ -153,7 +153,7 @@ func (oc *optCatalog) ResolveSchema( } return &optSchema{ planner: oc.planner, - desc: desc.(*DatabaseDescriptor), + desc: desc.(*sqlbase.ImmutableDatabaseDescriptor), name: oc.tn.ObjectNamePrefix, }, oc.tn.ObjectNamePrefix, nil } @@ -206,7 +206,7 @@ func (oc *optCatalog) ResolveDataSourceByID( return ds, false, err } -func getDescForCatalogObject(o cat.Object) (sqlbase.DescriptorProto, error) { +func getDescForCatalogObject(o cat.Object) (sqlbase.DescriptorInterface, error) { switch t := o.(type) { case *optSchema: return t.desc, nil @@ -292,7 +292,7 @@ func (oc *optCatalog) fullyQualifiedNameWithTxn( if err != nil { return cat.DataSourceName{}, err } - return tree.MakeTableName(tree.Name(dbDesc.Name), tree.Name(desc.Name)), nil + return tree.MakeTableName(tree.Name(dbDesc.GetName()), tree.Name(desc.Name)), nil } // dataSourceForDesc returns a data source wrapper for the given descriptor. @@ -1324,7 +1324,7 @@ func newOptVirtualTable( // both cases. id |= cat.StableID(math.MaxUint32) << 32 } else { - id |= cat.StableID(dbDesc.(*DatabaseDescriptor).ID) << 32 + id |= cat.StableID(dbDesc.(*sqlbase.ImmutableDatabaseDescriptor).GetID()) << 32 } } diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 97c62e0e4b69..4b0a5bad6491 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -20,13 +20,11 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/geo/geoindex" - "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/span" @@ -131,10 +129,8 @@ func (ef *execFactory) ConstructScan( scan.isFull = len(scan.spans) == 1 && scan.spans[0].EqualValue( scan.desc.IndexSpan(ef.planner.ExecCfg().Codec, scan.index.ID), ) - for i := range reqOrdering { - if reqOrdering[i].ColIdx >= len(colCfg.wantedColumns) { - return nil, errors.Errorf("invalid reqOrdering: %v", reqOrdering) - } + if err = colCfg.assertValidReqOrdering(reqOrdering); err != nil { + return nil, err } scan.reqOrdering = ReqOrdering(reqOrdering) scan.estimatedRowCount = uint64(rowCount) @@ -1048,50 +1044,7 @@ func (ef *execFactory) ConstructPlan( if spool, ok := root.(*spoolNode); ok { root = spool.source } - res := &planTop{ - // TODO(radu): these fields can be modified by planning various opaque - // statements. We should have a cleaner way of plumbing these. - avoidBuffering: ef.planner.curPlan.avoidBuffering, - auditEvents: ef.planner.curPlan.auditEvents, - instrumentation: ef.planner.curPlan.instrumentation, - } - res.main.planNode = root.(planNode) - if len(subqueries) > 0 { - res.subqueryPlans = make([]subquery, len(subqueries)) - for i := range subqueries { - in := &subqueries[i] - out := &res.subqueryPlans[i] - out.subquery = in.ExprNode - switch in.Mode { - case exec.SubqueryExists: - out.execMode = rowexec.SubqueryExecModeExists - case exec.SubqueryOneRow: - out.execMode = rowexec.SubqueryExecModeOneRow - case exec.SubqueryAnyRows: - out.execMode = rowexec.SubqueryExecModeAllRowsNormalized - case exec.SubqueryAllRows: - out.execMode = rowexec.SubqueryExecModeAllRows - default: - return nil, errors.Errorf("invalid SubqueryMode %d", in.Mode) - } - out.expanded = true - out.plan.planNode = in.Root.(planNode) - } - } - if len(cascades) > 0 { - res.cascades = make([]cascadeMetadata, len(cascades)) - for i := range cascades { - res.cascades[i].Cascade = cascades[i] - } - } - if len(checks) > 0 { - res.checkPlans = make([]checkPlan, len(checks)) - for i := range checks { - res.checkPlans[i].plan.planNode = checks[i].(planNode) - } - } - - return res, nil + return constructPlan(ef.planner, root, subqueries, cascades, checks) } // urlOutputter handles writing strings into an encoded URL for EXPLAIN (OPT, @@ -2085,23 +2038,3 @@ func makeColDescList(table cat.Table, cols exec.TableColumnOrdinalSet) []sqlbase } return colDescs } - -// makeScanColumnsConfig builds a scanColumnsConfig struct by constructing a -// list of descriptor IDs for columns in the given cols set. Columns are -// identified by their ordinal position in the table schema. -func makeScanColumnsConfig(table cat.Table, cols exec.TableColumnOrdinalSet) scanColumnsConfig { - // Set visibility=execinfra.ScanVisibilityPublicAndNotPublic, since all - // columns in the "cols" set should be projected, regardless of whether - // they're public or non- public. The caller decides which columns to - // include (or not include). Note that when wantedColumns is non-empty, - // the visibility flag will never trigger the addition of more columns. - colCfg := scanColumnsConfig{ - wantedColumns: make([]tree.ColumnID, 0, cols.Len()), - visibility: execinfra.ScanVisibilityPublicAndNotPublic, - } - for c, ok := cols.Next(0); ok; c, ok = cols.Next(c + 1) { - desc := table.Column(c).(*sqlbase.ColumnDescriptor) - colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(desc.ID)) - } - return colCfg -} diff --git a/pkg/sql/parser/scan.go b/pkg/sql/parser/scan.go index f91c0f8f1629..ceb9e5851547 100644 --- a/pkg/sql/parser/scan.go +++ b/pkg/sql/parser/scan.go @@ -568,12 +568,12 @@ func (s *scanner) scanIdent(lval *sqlSymType) { if lval.id != lex.IDENT { if isExperimental { if _, ok := lex.AllowedExperimental[kw]; !ok { - // If the parsed token is not on the whitelisted set of keywords, + // If the parsed token is not on the allowlisted set of keywords, // then it might have been intended to be parsed as something else. // In that case, re-tokenize the original string. lval.id = lex.GetKeywordID(lval.str) } else { - // It is a whitelisted keyword, so remember the shortened + // It is a allowlisted keyword, so remember the shortened // keyword for further processing. lval.str = kw } diff --git a/pkg/sql/partition_test.go b/pkg/sql/partition_test.go index 19b4895a701f..674cb1a7f4d0 100644 --- a/pkg/sql/partition_test.go +++ b/pkg/sql/partition_test.go @@ -41,7 +41,7 @@ func TestRemovePartitioningOSS(t *testing.T) { if err := tests.CreateKVTable(sqlDBRaw, "kv", numRows); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") tableKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID) // Hack in partitions. Doing this properly requires a CCL binary. @@ -64,7 +64,7 @@ func TestRemovePartitioningOSS(t *testing.T) { // Note that this is really a gross hack - it breaks planner caches, which // assume that nothing is going to change out from under them like this. We // "fix" the issue by altering the table's name to refresh the cache, below. - if err := kvDB.Put(ctx, tableKey, sqlbase.WrapDescriptor(tableDesc)); err != nil { + if err := kvDB.Put(ctx, tableKey, tableDesc.DescriptorProto()); err != nil { t.Fatal(err) } sqlDB.Exec(t, "ALTER TABLE t.kv RENAME to t.kv2") diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 34ceeb16e5bc..e901774391c0 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -298,7 +298,7 @@ CREATE TABLE pg_catalog.pg_am ( amhandler OID, amtype CHAR )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // add row for forward indexes if err := addRow( forwardIndexOid, // oid - all versions @@ -393,8 +393,8 @@ CREATE TABLE pg_catalog.pg_attrdef ( INDEX(adrelid) )`, virtualMany, false, /* includesIndexEntries */ - func(ctx context.Context, p *planner, h oidHasher, db *DatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.ImmutableDatabaseDescriptor, scName string, + table *sqlbase.ImmutableTableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error) error { colNum := 0 @@ -459,8 +459,8 @@ CREATE TABLE pg_catalog.pg_attribute ( INDEX(attrelid) )`, virtualMany, true, /* includesIndexEntries */ - func(ctx context.Context, p *planner, h oidHasher, db *DatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.ImmutableDatabaseDescriptor, scName string, + table *sqlbase.ImmutableTableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error) error { // addColumn adds adds either a table or a index column to the pg_attribute table. @@ -522,7 +522,7 @@ CREATE TABLE pg_catalog.pg_cast ( castcontext CHAR, castmethod CHAR )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // TODO(someone): to populate this, we should split up the big PerformCast // method in tree/eval.go into entries in a list. Then, this virtual table // can simply range over the list. This would probably be better for @@ -550,7 +550,7 @@ CREATE TABLE pg_catalog.pg_authid ( rolpassword TEXT, rolvaliduntil TIMESTAMPTZ )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachRole(ctx, p, func(username string, isRole bool, noLogin bool) error { isRoot := tree.DBool(username == security.RootUser || username == sqlbase.AdminRole) @@ -584,7 +584,7 @@ CREATE TABLE pg_catalog.pg_auth_members ( grantor OID, admin_option BOOL )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachRoleMembership(ctx, p, func(roleName, memberName string, isAdmin bool) error { @@ -608,7 +608,7 @@ CREATE TABLE pg_catalog.pg_available_extensions ( installed_version TEXT, comment TEXT )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // We support no extensions. return nil }, @@ -659,8 +659,8 @@ CREATE TABLE pg_catalog.pg_class ( INDEX (oid) )`, virtualMany, true, /* includesIndexEntries */ - func(ctx context.Context, p *planner, h oidHasher, db *DatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, _ simpleSchemaResolver, addRow func(...tree.Datum) error) error { + func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.ImmutableDatabaseDescriptor, scName string, + table *sqlbase.ImmutableTableDescriptor, _ simpleSchemaResolver, addRow func(...tree.Datum) error) error { // The only difference between tables, views and sequences are the relkind and relam columns. relKind := relKindTable relAm := forwardIndexOid @@ -764,9 +764,9 @@ CREATE TABLE pg_catalog.pg_collation ( collcollate STRING, collctype STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - return forEachDatabaseDesc(ctx, p, dbContext, false /* requiresPrivileges */, func(db *DatabaseDescriptor) error { + return forEachDatabaseDesc(ctx, p, dbContext, false /* requiresPrivileges */, func(db *sqlbase.ImmutableDatabaseDescriptor) error { namespaceOid := h.NamespaceOid(db, pgCatalogName) for _, tag := range collate.Supported() { collName := tag.String() @@ -830,9 +830,9 @@ func populateTableConstraints( ctx context.Context, p *planner, h oidHasher, - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup simpleSchemaResolver, addRow func(...tree.Datum) error, ) error { @@ -860,7 +860,7 @@ func populateTableConstraints( var err error switch con.Kind { case sqlbase.ConstraintTypePK: - oid = h.PrimaryKeyConstraintOid(db, scName, table, con.Index) + oid = h.PrimaryKeyConstraintOid(db, scName, table.TableDesc(), con.Index) contype = conTypePKey conindid = h.IndexOid(table.ID, con.Index.ID) @@ -871,7 +871,7 @@ func populateTableConstraints( condef = tree.NewDString(table.PrimaryKeyString()) case sqlbase.ConstraintTypeFK: - oid = h.ForeignKeyConstraintOid(db, scName, table, con.FK) + oid = h.ForeignKeyConstraintOid(db, scName, table.TableDesc(), con.FK) contype = conTypeFK // Foreign keys don't have a single linked index. Pick the first one // that matches on the referenced table. @@ -902,13 +902,13 @@ func populateTableConstraints( return err } var buf bytes.Buffer - if err := showForeignKeyConstraint(&buf, db.Name, table, con.FK, tableLookup); err != nil { + if err := showForeignKeyConstraint(&buf, db.GetName(), table, con.FK, tableLookup); err != nil { return err } condef = tree.NewDString(buf.String()) case sqlbase.ConstraintTypeUnique: - oid = h.UniqueConstraintOid(db, scName, table, con.Index) + oid = h.UniqueConstraintOid(db, scName, table.TableDesc(), con.Index) contype = conTypeUnique conindid = h.IndexOid(table.ID, con.Index.ID) var err error @@ -922,7 +922,7 @@ func populateTableConstraints( condef = tree.NewDString(f.CloseAndGetString()) case sqlbase.ConstraintTypeCheck: - oid = h.CheckConstraintOid(db, scName, table, con.CheckConstraint) + oid = h.CheckConstraintOid(db, scName, table.TableDesc(), con.CheckConstraint) contype = conTypeCheck if conkey, err = colIDArrayToDatum(con.CheckConstraint.ColumnIDs); err != nil { return err @@ -971,7 +971,9 @@ type oneAtATimeSchemaResolver struct { p *planner } -func (r oneAtATimeSchemaResolver) getDatabaseByID(id sqlbase.ID) (*DatabaseDescriptor, error) { +func (r oneAtATimeSchemaResolver) getDatabaseByID( + id sqlbase.ID, +) (*sqlbase.ImmutableDatabaseDescriptor, error) { return r.p.Tables().DatabaseCache().GetDatabaseDescByID(r.ctx, r.p.txn, id) } @@ -995,15 +997,15 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( schemaDef string, virtualOpts virtualOpts, includesIndexEntries bool, - populateFromTable func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.DatabaseDescriptor, - scName string, table *sqlbase.TableDescriptor, lookup simpleSchemaResolver, + populateFromTable func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.ImmutableDatabaseDescriptor, + scName string, table *sqlbase.ImmutableTableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error, ) error, ) virtualSchemaTable { - populateAll := func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populateAll := func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachTableDescWithTableLookup(ctx, p, dbContext, virtualOpts, - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, lookup tableLookupFn) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor, lookup tableLookupFn) error { return populateFromTable(ctx, p, h, db, scName, table, lookup, addRow) }) } @@ -1013,7 +1015,7 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( indexes: []virtualIndex{ { partial: includesIndexEntries, - populate: func(ctx context.Context, constraint tree.Datum, p *planner, db *DatabaseDescriptor, + populate: func(ctx context.Context, constraint tree.Datum, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) (bool, error) { var id sqlbase.ID d := tree.UnwrapDatum(p.EvalContext(), constraint) @@ -1042,18 +1044,18 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( } // Don't include tables that aren't in the current database unless // they're virtual, dropped tables, or ones that the user can't see. - if (!table.Desc.IsVirtualTable() && table.Desc.ParentID != db.ID) || + if (!table.Desc.IsVirtualTable() && table.Desc.ParentID != db.GetID()) || table.Desc.Dropped() || - !userCanSeeTable(ctx, p, table.Desc.TableDesc(), true /*allowAdding*/) { + !userCanSeeTable(ctx, p, table.Desc, true /*allowAdding*/) { return false, nil } h := makeOidHasher() scResolver := oneAtATimeSchemaResolver{p: p, ctx: ctx} - scName, err := resolver.ResolveSchemaNameByID(ctx, p.txn, p.ExecCfg().Codec, db.ID, table.Desc.GetParentSchemaID()) + scName, err := resolver.ResolveSchemaNameByID(ctx, p.txn, p.ExecCfg().Codec, db.GetID(), table.Desc.GetParentSchemaID()) if err != nil { return false, err } - if err := populateFromTable(ctx, p, h, db, scName, table.Desc.TableDesc(), scResolver, + if err := populateFromTable(ctx, p, h, db, scName, table.Desc, scResolver, addRow); err != nil { return false, err } @@ -1146,7 +1148,7 @@ CREATE TABLE pg_catalog.pg_conversion ( conproc OID, condefault BOOL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -1171,13 +1173,13 @@ CREATE TABLE pg_catalog.pg_database ( dattablespace OID, datacl STRING[] )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachDatabaseDesc(ctx, p, nil /*all databases*/, false, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { return addRow( - dbOid(db.ID), // oid - tree.NewDName(db.Name), // datname - tree.DNull, // datdba + dbOid(db.GetID()), // oid + tree.NewDName(db.GetName()), // datname + tree.DNull, // datdba // If there is a change in encoding value for the database we must update // the definitions of getdatabaseencoding within pg_builtin. builtins.DatEncodingUTFId, // encoding @@ -1207,7 +1209,7 @@ CREATE TABLE pg_catalog.pg_default_acl ( defaclobjtype CHAR, defaclacl STRING[] )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -1252,7 +1254,7 @@ CREATE TABLE pg_catalog.pg_depend ( refobjsubid INT4, deptype CHAR )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { vt := p.getVirtualTabler() pgConstraintsDesc, err := vt.getVirtualTableDesc(&pgConstraintsTableName) if err != nil { @@ -1264,9 +1266,9 @@ CREATE TABLE pg_catalog.pg_depend ( } h := makeOidHasher() return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual /*virtual tables have no constraints*/, func( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, - table *sqlbase.TableDescriptor, + table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn, ) error { pgConstraintTableOid := tableOid(pgConstraintsDesc.ID) @@ -1308,7 +1310,7 @@ CREATE TABLE pg_catalog.pg_depend ( } else { refObjID = h.IndexOid(con.ReferencedTable.ID, idx.ID) } - constraintOid := h.ForeignKeyConstraintOid(db, scName, table, con.FK) + constraintOid := h.ForeignKeyConstraintOid(db, scName, table.TableDesc(), con.FK) if err := addRow( pgConstraintTableOid, // classid @@ -1357,7 +1359,7 @@ CREATE TABLE pg_catalog.pg_description ( populate: func( ctx context.Context, p *planner, - dbContext *DatabaseDescriptor, + dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // This is less efficient than it has to be - if we see performance problems @@ -1410,7 +1412,7 @@ CREATE TABLE pg_catalog.pg_shdescription ( classoid OID, description STRING )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // See comment above - could make this more efficient if necessary. comments, err := getComments(ctx, p) if err != nil { @@ -1445,9 +1447,10 @@ CREATE TABLE pg_catalog.pg_enum ( enumsortorder FLOAT4, enumlabel STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - return forEachTypeDesc(ctx, p, dbContext, func(_ *DatabaseDescriptor, _ string, typDesc *TypeDescriptor) error { + + return forEachTypeDesc(ctx, p, dbContext, func(_ *sqlbase.ImmutableDatabaseDescriptor, _ string, typDesc *sqlbase.ImmutableTypeDescriptor) error { // We only want to iterate over ENUM types. if typDesc.Kind != sqlbase.TypeDescriptor_ENUM { return nil @@ -1455,7 +1458,7 @@ CREATE TABLE pg_catalog.pg_enum ( // Generate a row for each member of the enum. We don't represent enums // internally using floats for ordering like Postgres, so just pick a // float entry for the rows. - typOID := tree.NewDOid(tree.DInt(types.StableTypeIDToOID(uint32(typDesc.ID)))) + typOID := tree.NewDOid(tree.DInt(types.StableTypeIDToOID(uint32(typDesc.GetID())))) for i, member := range typDesc.EnumMembers { if err := addRow( h.EnumEntryOid(typOID, member.PhysicalRepresentation), @@ -1483,7 +1486,7 @@ CREATE TABLE pg_catalog.pg_event_trigger ( evtenabled CHAR, evttags TEXT[] )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Event triggers are not currently supported. return nil }, @@ -1503,7 +1506,7 @@ CREATE TABLE pg_catalog.pg_extension ( extconfig STRING, extcondition STRING )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Extensions are not supported. return nil }, @@ -1522,7 +1525,7 @@ CREATE TABLE pg_catalog.pg_foreign_data_wrapper ( fdwacl STRING[], fdwoptions STRING[] )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Foreign data wrappers are not supported. return nil }, @@ -1542,7 +1545,7 @@ CREATE TABLE pg_catalog.pg_foreign_server ( srvacl STRING[], srvoptions STRING[] )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Foreign servers are not supported. return nil }, @@ -1557,7 +1560,7 @@ CREATE TABLE pg_catalog.pg_foreign_table ( ftserver OID, ftoptions STRING[] )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Foreign tables are not supported. return nil }, @@ -1598,10 +1601,10 @@ CREATE TABLE pg_catalog.pg_index ( indexprs STRING, indpred STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual tables do not have indexes */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { tableOid := tableOid(table.ID) return forEachIndexInTable(table, func(index *sqlbase.IndexDescriptor) error { isMutation, isWriteOnly := @@ -1686,10 +1689,10 @@ CREATE TABLE pg_catalog.pg_indexes ( tablespace NAME, indexdef STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachTableDescWithTableLookup(ctx, p, dbContext, hideVirtual, /* virtual tables do not have indexes */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, tableLookup tableLookupFn) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor, tableLookup tableLookupFn) error { scNameName := tree.NewDName(scName) tblName := tree.NewDName(table.Name) return forEachIndexInTable(table, func(index *sqlbase.IndexDescriptor) error { @@ -1716,14 +1719,14 @@ CREATE TABLE pg_catalog.pg_indexes ( func indexDefFromDescriptor( ctx context.Context, p *planner, - db *sqlbase.DatabaseDescriptor, - table *sqlbase.TableDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, + table *sqlbase.ImmutableTableDescriptor, index *sqlbase.IndexDescriptor, tableLookup tableLookupFn, ) (string, error) { indexDef := tree.CreateIndex{ Name: tree.Name(index.Name), - Table: tree.MakeTableName(tree.Name(db.Name), tree.Name(table.Name)), + Table: tree.MakeTableName(tree.Name(db.GetName()), tree.Name(table.Name)), Unique: index.Unique, Columns: make(tree.IndexElemList, len(index.ColumnNames)), Storing: make(tree.NameList, len(index.StoreColumnNames)), @@ -1758,7 +1761,7 @@ func indexDefFromDescriptor( } fields := index.ColumnNames[:sharedPrefixLen] intlDef := &tree.InterleaveDef{ - Parent: tree.MakeTableName(tree.Name(parentDb.Name), tree.Name(parentTable.Name)), + Parent: tree.MakeTableName(tree.Name(parentDb.GetName()), tree.Name(parentTable.Name)), Fields: make(tree.NameList, len(fields)), } for i, field := range fields { @@ -1780,7 +1783,7 @@ CREATE TABLE pg_catalog.pg_inherits ( inhparent OID, inhseqno INT4 )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Table inheritance is not supported. return nil }, @@ -1801,7 +1804,7 @@ CREATE TABLE pg_catalog.pg_language ( lanvalidator OID, lanacl STRING[] )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Languages to write functions and stored procedures are not supported. return nil }, @@ -1828,7 +1831,7 @@ CREATE TABLE pg_catalog.pg_locks ( granted BOOLEAN, fastpath BOOLEAN )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -1846,7 +1849,7 @@ CREATE TABLE pg_catalog.pg_matviews ( ispopulated BOOL, definition TEXT )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -1861,10 +1864,10 @@ CREATE TABLE pg_catalog.pg_namespace ( nspowner OID, nspacl STRING[] )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachDatabaseDesc(ctx, p, dbContext, true, /* requiresPrivileges */ - func(db *sqlbase.DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { return forEachSchemaName(ctx, p, db, func(s string) error { return addRow( h.NamespaceOid(db, s), // oid @@ -1907,7 +1910,7 @@ CREATE TABLE pg_catalog.pg_operator ( oprrest OID, oprjoin OID )`, - populate: func(ctx context.Context, p *planner, db *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() nspOid := h.NamespaceOid(db, pgCatalogName) addOp := func(opName string, kind tree.Datum, params tree.TypeList, returnTyper tree.ReturnTyper) error { @@ -2012,7 +2015,7 @@ CREATE TABLE pg_catalog.pg_prepared_xacts ( owner NAME, database NAME )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2033,7 +2036,7 @@ CREATE TABLE pg_catalog.pg_prepared_statements ( parameter_types REGTYPE[], from_sql boolean )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { for name, stmt := range p.preparedStatements.List() { placeholderTypes := stmt.PrepareMetadata.PlaceholderTypesInfo.Types paramTypes := tree.NewDArray(types.RegType) @@ -2114,10 +2117,10 @@ CREATE TABLE pg_catalog.pg_proc ( proconfig STRING[], proacl STRING[] )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachDatabaseDesc(ctx, p, dbContext, false, /* requiresPrivileges */ - func(db *DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { nspOid := h.NamespaceOid(db, pgCatalogName) for _, name := range builtins.AllBuiltinNames { // parser.Builtins contains duplicate uppercase and lowercase keys. @@ -2251,7 +2254,7 @@ CREATE TABLE pg_catalog.pg_range ( rngcanonical OID, rngsubdiff OID )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // We currently do not support any range types, so this table is empty. // This table should be populated when any range types are added to // oidToDatum (and therefore pg_type). @@ -2273,7 +2276,7 @@ CREATE TABLE pg_catalog.pg_rewrite ( ev_qual TEXT, ev_action TEXT )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Rewrite rules are not supported. return nil }, @@ -2299,7 +2302,7 @@ CREATE TABLE pg_catalog.pg_roles ( rolbypassrls BOOL, rolconfig STRING[] )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // We intentionally do not check if the user has access to system.user. // Because Postgres allows access to pg_roles by non-privileged users, we // need to do the same. This shouldn't be an issue, because pg_roles doesn't @@ -2344,7 +2347,7 @@ CREATE TABLE pg_catalog.pg_seclabels ( provider TEXT, label TEXT )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2363,9 +2366,9 @@ CREATE TABLE pg_catalog.pg_sequence ( seqcache INT8, seqcycle BOOL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc(ctx, p, dbContext, hideVirtual, /* virtual schemas do not have indexes */ - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if !table.IsSequence() { return nil } @@ -2412,7 +2415,7 @@ CREATE TABLE pg_catalog.pg_settings ( sourceline INT4, pending_restart BOOL )`, - populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(_ context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { for _, vName := range varNames { gen := varGen[vName] if gen.Hidden { @@ -2475,7 +2478,7 @@ CREATE TABLE pg_catalog.pg_shdepend ( refobjid OID, deptype CHAR )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2494,12 +2497,12 @@ CREATE TABLE pg_catalog.pg_tables ( hastriggers BOOL, rowsecurity BOOL )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Note: pg_catalog.pg_tables is not well-defined if the dbContext is // empty -- listing tables across databases can yield duplicate // schema/table names. return forEachTableDesc(ctx, p, dbContext, virtualMany, - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if !table.IsTable() { return nil } @@ -2529,7 +2532,7 @@ CREATE TABLE pg_catalog.pg_tablespace ( spcacl TEXT[], spcoptions TEXT[] )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return addRow( oidZero, // oid tree.NewDString("pg_default"), // spcname @@ -2565,7 +2568,7 @@ CREATE TABLE pg_catalog.pg_trigger ( tgoldtable NAME, tgnewtable NAME )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Triggers are unsupported. return nil }, @@ -2727,10 +2730,10 @@ CREATE TABLE pg_catalog.pg_type ( typdefault STRING, typacl STRING[] )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachDatabaseDesc(ctx, p, dbContext, false, /* requiresPrivileges */ - func(db *DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { nspOid := h.NamespaceOid(db, pgCatalogName) // Generate rows for all predefined types. @@ -2741,9 +2744,10 @@ CREATE TABLE pg_catalog.pg_type ( } // Now generate rows for user defined types in this database. - return forEachTypeDesc(ctx, p, dbContext, func(_ *DatabaseDescriptor, _ string, typDesc *TypeDescriptor) error { + + return forEachTypeDesc(ctx, p, dbContext, func(_ *sqlbase.ImmutableDatabaseDescriptor, _ string, typDesc *sqlbase.ImmutableTypeDescriptor) error { typ, err := typDesc.MakeTypesT( - tree.NewUnqualifiedTypeName(tree.Name(typDesc.Name)), + tree.NewUnqualifiedTypeName(tree.Name(typDesc.GetName())), p.makeTypeLookupFn(ctx), ) if err != nil { @@ -2770,7 +2774,7 @@ CREATE TABLE pg_catalog.pg_user ( valuntil TIMESTAMP, useconfig TEXT[] )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachRole(ctx, p, func(username string, isRole bool, noLogin bool) error { @@ -2803,7 +2807,7 @@ CREATE TABLE pg_catalog.pg_user_mapping ( umserver OID, umoptions TEXT[] )`, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // This table stores the mapping to foreign server users. // Foreign servers are not supported. return nil @@ -2836,7 +2840,7 @@ CREATE TABLE pg_catalog.pg_stat_activity ( query TEXT ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2853,7 +2857,7 @@ CREATE TABLE pg_catalog.pg_seclabel ( label TEXT ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2869,7 +2873,7 @@ CREATE TABLE pg_catalog.pg_shseclabel ( label TEXT ) `, - populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, _ *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return nil }, } @@ -2958,11 +2962,11 @@ CREATE TABLE pg_catalog.pg_views ( viewowner STRING, definition STRING )`, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { // Note: pg_views is not well defined if the dbContext is empty, // because it does not distinguish views in separate databases. return forEachTableDesc(ctx, p, dbContext, hideVirtual, /*virtual schemas do not have views*/ - func(db *sqlbase.DatabaseDescriptor, scName string, desc *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, desc *sqlbase.ImmutableTableDescriptor) error { if !desc.IsView() { return nil } @@ -3011,10 +3015,10 @@ CREATE TABLE pg_catalog.pg_aggregate ( aggminitval TEXT ) `, - populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() return forEachDatabaseDesc(ctx, p, dbContext, false, /* requiresPrivileges */ - func(db *DatabaseDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor) error { for _, name := range builtins.AllAggregateBuiltinNames { if name == builtins.AnyNotNull { // any_not_null is treated as a special case. @@ -3173,8 +3177,8 @@ func (h oidHasher) getOid() *tree.DOid { return tree.NewDOid(tree.DInt(i)) } -func (h oidHasher) writeDB(db *sqlbase.DatabaseDescriptor) { - h.writeUInt32(uint32(db.ID)) +func (h oidHasher) writeDB(db *sqlbase.ImmutableDatabaseDescriptor) { + h.writeUInt32(uint32(db.GetID())) } func (h oidHasher) writeSchema(scName string) { @@ -3199,7 +3203,7 @@ func (h oidHasher) writeForeignKeyConstraint(fk *sqlbase.ForeignKeyConstraint) { h.writeStr(fk.Name) } -func (h oidHasher) NamespaceOid(db *sqlbase.DatabaseDescriptor, scName string) *tree.DOid { +func (h oidHasher) NamespaceOid(db *sqlbase.ImmutableDatabaseDescriptor, scName string) *tree.DOid { h.writeTypeTag(namespaceTypeTag) h.writeDB(db) h.writeSchema(scName) @@ -3221,7 +3225,7 @@ func (h oidHasher) ColumnOid(tableID sqlbase.ID, columnID sqlbase.ColumnID) *tre } func (h oidHasher) CheckConstraintOid( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, check *sqlbase.TableDescriptor_CheckConstraint, @@ -3235,7 +3239,7 @@ func (h oidHasher) CheckConstraintOid( } func (h oidHasher) PrimaryKeyConstraintOid( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, pkey *sqlbase.IndexDescriptor, @@ -3249,7 +3253,7 @@ func (h oidHasher) PrimaryKeyConstraintOid( } func (h oidHasher) ForeignKeyConstraintOid( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, fk *sqlbase.ForeignKeyConstraint, @@ -3263,7 +3267,7 @@ func (h oidHasher) ForeignKeyConstraintOid( } func (h oidHasher) UniqueConstraintOid( - db *sqlbase.DatabaseDescriptor, + db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, diff --git a/pkg/sql/pg_extension.go b/pkg/sql/pg_extension.go index 97613afba5a7..6338f8939973 100644 --- a/pkg/sql/pg_extension.go +++ b/pkg/sql/pg_extension.go @@ -15,11 +15,11 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/geo/geopb" + "github.com/cockroachdb/cockroach/pkg/geo/geoprojbase" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/errors" ) // pgExtension is virtual schema which contains virtual tables and/or views @@ -38,14 +38,14 @@ var pgExtension = virtualSchema{ func postgisColumnsTablePopulator( matchingFamily types.Family, -) func(context.Context, *planner, *DatabaseDescriptor, func(...tree.Datum) error) error { - return func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { +) func(context.Context, *planner, *sqlbase.ImmutableDatabaseDescriptor, func(...tree.Datum) error) error { + return func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { return forEachTableDesc( ctx, p, dbContext, hideVirtual, - func(db *sqlbase.DatabaseDescriptor, scName string, table *sqlbase.TableDescriptor) error { + func(db *sqlbase.ImmutableDatabaseDescriptor, scName string, table *sqlbase.ImmutableTableDescriptor) error { if !table.IsPhysicalTable() { return nil } @@ -140,7 +140,18 @@ CREATE TABLE pg_extension.spatial_ref_sys ( srtext varchar(2048), proj4text varchar(2048) )`, - generator: func(ctx context.Context, p *planner, db *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { - return nil, func() {}, errors.Newf("not yet implemented") + populate: func(ctx context.Context, p *planner, dbContext *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error { + for _, projection := range geoprojbase.Projections { + if err := addRow( + tree.NewDInt(tree.DInt(projection.SRID)), + tree.NewDString(projection.AuthName), + tree.NewDInt(tree.DInt(projection.AuthSRID)), + tree.NewDString(projection.SRText), + tree.NewDString(projection.Proj4Text.String()), + ); err != nil { + return err + } + } + return nil }, } diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index 30c3d19d3506..e400b8e4853f 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -258,7 +258,8 @@ func hbaRunTest(t *testing.T, insecure bool) { // this is currently broken for secondary loggers. // See: https://github.com/cockroachdb/cockroach/issues/45745 // So instead we need to do the filtering ourselves. - entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, authLogFileRe) + entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, authLogFileRe, + log.WithFlattenedSensitiveData) if err != nil { t.Fatal(err) } @@ -274,13 +275,17 @@ func hbaRunTest(t *testing.T, insecure bool) { entry := &entries[i] t.Logf("found log entry: %+v", *entry) - // The message is going to contain a client address, with a random port number. + // The tag part is going to contain a client address, with a random port number. // To make the test deterministic, erase the random part. - msg := addrRe.ReplaceAllString(entry.Message, ",client=XXX") + tags := addrRe.ReplaceAllString(entry.Tags, ",client=XXX") + var maybeTags string + if len(tags) > 0 { + maybeTags = "[" + tags + "] " + } // Ditto with the duration. - msg = durationRe.ReplaceAllString(msg, "duration: XXX") + msg := durationRe.ReplaceAllString(entry.Message, "duration: XXX") - fmt.Fprintf(&buf, "%c: %s\n", entry.Severity.String()[0], msg) + fmt.Fprintf(&buf, "%c: %s%s\n", entry.Severity.String()[0], maybeTags, msg) } lastLogMsg := entries[0].Message if !re.MatchString(lastLogMsg) { diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index b37c4ab08c3d..49645b130b5a 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -523,7 +523,7 @@ func TestPGPreparedQuery(t *testing.T) { baseTest.Results("defaultdb"), }}, {"SELECT descriptor FROM system.descriptor WHERE descriptor != $1 LIMIT 1", []preparedQueryTest{ - baseTest.SetArgs([]byte("abc")).Results([]byte("\x12!\n\x06system\x10\x01\x1a\x15\n\t\n\x05admin\x100\n\b\n\x04root\x100")), + baseTest.SetArgs([]byte("abc")).Results([]byte("\x12%\n\x06system\x10\x01\x1a\x15\n\t\n\x05admin\x100\n\b\n\x04root\x100\"\x00(\x01")), }}, {"SHOW COLUMNS FROM system.users", []preparedQueryTest{ baseTest. diff --git a/pkg/sql/physicalplan/physical_plan.go b/pkg/sql/physicalplan/physical_plan.go index 553b45d2cd03..fae6264e3d67 100644 --- a/pkg/sql/physicalplan/physical_plan.go +++ b/pkg/sql/physicalplan/physical_plan.go @@ -77,7 +77,7 @@ type PhysicalPlan struct { LocalProcessors []execinfra.LocalProcessor // LocalProcessorIndexes contains pointers to all of the RowSourceIdx fields - // of the LocalPlanNodeSpecs that were created. This list is in the same + // of the LocalPlanNodeSpecs that were created. This list is in the same // order as LocalProcessors, and is kept up-to-date so that LocalPlanNodeSpecs // always have the correct index into the LocalProcessors slice. LocalProcessorIndexes []*uint32 @@ -105,6 +105,10 @@ type PhysicalPlan struct { // in-place during planning. ResultTypes []*types.T + // ResultColumns is the schema (result columns) of the rows produced by the + // ResultRouters. + ResultColumns sqlbase.ResultColumns + // MergeOrdering is the ordering guarantee for the result streams that must be // maintained when the streams eventually merge. The column indexes refer to // columns for the rows produced by ResultRouters. diff --git a/pkg/sql/plan.go b/pkg/sql/plan.go index c4d63faa3e66..95dbc7092000 100644 --- a/pkg/sql/plan.go +++ b/pkg/sql/plan.go @@ -329,9 +329,7 @@ func (p planMaybePhysical) isPhysicalPlan() bool { func (p planMaybePhysical) planColumns() sqlbase.ResultColumns { if p.isPhysicalPlan() { - // TODO(yuzefovich): update this once we support creating table reader - // specs directly in the optimizer (see #47474). - return nil + return p.physPlan.ResultColumns } return planColumns(p.planNode) } diff --git a/pkg/sql/plan_opt.go b/pkg/sql/plan_opt.go index 6404567dece5..1e9704de3ecd 100644 --- a/pkg/sql/plan_opt.go +++ b/pkg/sql/plan_opt.go @@ -180,8 +180,11 @@ func (p *planner) makeOptimizerPlan(ctx context.Context) error { plan exec.Plan bld *execbuilder.Builder ) + // TODO(yuzefovich): we're creating a new exec.Factory for every query, but + // we probably could pool those allocations using sync.Pool. Investigate + // this. if mode := p.SessionData().ExperimentalDistSQLPlanningMode; mode != sessiondata.ExperimentalDistSQLPlanningOff { - bld = execbuilder.New(newDistSQLSpecExecFactory(), execMemo, &opc.catalog, root, p.EvalContext()) + bld = execbuilder.New(newDistSQLSpecExecFactory(p), execMemo, &opc.catalog, root, p.EvalContext()) plan, err = bld.Build() if err != nil { if mode == sessiondata.ExperimentalDistSQLPlanningAlways && diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index 12ff998342b7..66e063f478aa 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -94,7 +94,7 @@ type PlanHookState interface { ctx context.Context, tn *TableName, required bool, requiredType resolver.ResolveRequiredType, ) (table *MutableTableDescriptor, err error) ShowCreate( - ctx context.Context, dbPrefix string, allDescs []sqlbase.Descriptor, desc *sqlbase.TableDescriptor, displayOptions ShowCreateDisplayOptions, + ctx context.Context, dbPrefix string, allDescs []sqlbase.Descriptor, desc *sqlbase.ImmutableTableDescriptor, displayOptions ShowCreateDisplayOptions, ) (string, error) } diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index e6cf4d09bb3d..43827b0fa8ba 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -197,6 +197,12 @@ type planner struct { noticeSender noticeSender queryCacheSession querycache.Session + + // contextDatabaseID is the ID of a database. It is set during some name + // resolution processes to disallow cross database references. In particular, + // the type resolution steps will disallow resolution of types that have a + // parentID != contextDatabaseID when it is set. + contextDatabaseID sqlbase.ID } func (ctx *extendedEvalContext) setSessionID(sessionID ClusterWideID) { diff --git a/pkg/sql/privileged_accessor.go b/pkg/sql/privileged_accessor.go index e1dca823a60d..5010509ddace 100644 --- a/pkg/sql/privileged_accessor.go +++ b/pkg/sql/privileged_accessor.go @@ -99,14 +99,13 @@ func (p *planner) LookupZoneConfigByNamespaceID( // to check the permissions of a descriptor given its ID, or the id given // is not a descriptor of a table or database. func (p *planner) checkDescriptorPermissions(ctx context.Context, id sqlbase.ID) error { - desc, found, err := catalogkv.LookupDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id) + desc, err := catalogkv.GetDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id) if err != nil { return err } - if !found { + if desc == nil { return nil } - if err := p.CheckAnyPrivilege(ctx, desc); err != nil { return pgerror.New(pgcode.InsufficientPrivilege, "insufficient privilege") } diff --git a/pkg/sql/rename_database.go b/pkg/sql/rename_database.go index edb4342bc369..5789693ba970 100644 --- a/pkg/sql/rename_database.go +++ b/pkg/sql/rename_database.go @@ -25,7 +25,7 @@ import ( ) type renameDatabaseNode struct { - dbDesc *sqlbase.DatabaseDescriptor + dbDesc *sqlbase.ImmutableDatabaseDescriptor newName string } @@ -86,7 +86,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { lookupFlags := p.CommonLookupFlags(true /*required*/) // DDL statements bypass the cache. lookupFlags.AvoidCached = true - schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.ID) + schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.GetID()) if err != nil { return err } @@ -135,7 +135,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { dependedOn, tbDesc, dependentDesc, - dbDesc.Name, + dbDesc.GetName(), ) if err != nil { return err @@ -145,12 +145,12 @@ func (n *renameDatabaseNode) startExec(params runParams) error { } tbTableName := tree.MakeTableNameWithSchema( - tree.Name(dbDesc.Name), + tree.Name(dbDesc.GetName()), tree.Name(schema), tree.Name(tbDesc.Name), ) var dependentDescQualifiedString string - if dbDesc.ID != dependentDesc.ParentID || tbDesc.GetParentSchemaID() != dependentDesc.GetParentSchemaID() { + if dbDesc.GetID() != dependentDesc.ParentID || tbDesc.GetParentSchemaID() != dependentDesc.GetParentSchemaID() { var err error dependentDescQualifiedString, err = p.getQualifiedTableName(ctx, dependentDesc) if err != nil { @@ -167,7 +167,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { } } else { dependentDescTableName := tree.MakeTableNameWithSchema( - tree.Name(dbDesc.Name), + tree.Name(dbDesc.GetName()), tree.Name(schema), tree.Name(dependentDesc.Name), ) @@ -187,10 +187,10 @@ func (n *renameDatabaseNode) startExec(params runParams) error { tbTableName.String(), dependentDescQualifiedString, ) - if dependentDesc.GetParentID() == dbDesc.ID { + if dependentDesc.GetParentID() == dbDesc.GetID() { hint += fmt.Sprintf( " or modify the default to not reference the database name %q", - dbDesc.Name, + dbDesc.GetName(), ) } return errors.WithHint(depErr, hint) diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 7d4e93e08cb1..5cc95e8fd670 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -114,10 +114,10 @@ func (n *renameTableNode) startExec(params runParams) error { } tableDesc.SetName(newTn.Table()) - tableDesc.ParentID = targetDbDesc.ID + tableDesc.ParentID = targetDbDesc.GetID() newTbKey := sqlbase.MakePublicTableNameKey(ctx, params.ExecCfg().Settings, - targetDbDesc.ID, newTn.Table()).Key(p.ExecCfg().Codec) + targetDbDesc.GetID(), newTn.Table()).Key(p.ExecCfg().Codec) if err := tableDesc.Validate(ctx, p.txn, p.ExecCfg().Codec); err != nil { return err @@ -126,8 +126,8 @@ func (n *renameTableNode) startExec(params runParams) error { descID := tableDesc.GetID() parentSchemaID := tableDesc.GetParentSchemaID() - renameDetails := sqlbase.TableDescriptor_NameInfo{ - ParentID: prevDbDesc.ID, + renameDetails := sqlbase.NameInfo{ + ParentID: prevDbDesc.GetID(), ParentSchemaID: parentSchemaID, Name: oldTn.Table()} tableDesc.DrainingNames = append(tableDesc.DrainingNames, renameDetails) @@ -145,13 +145,13 @@ func (n *renameTableNode) startExec(params runParams) error { log.VEventf(ctx, 2, "CPut %s -> %d", newTbKey, descID) } err = catalogkv.WriteDescToBatch(ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), - p.EvalContext().Settings, b, p.ExecCfg().Codec, descID, tableDesc.TableDesc()) + p.EvalContext().Settings, b, p.ExecCfg().Codec, descID, tableDesc) if err != nil { return err } exists, id, err := sqlbase.LookupPublicTableID( - params.ctx, params.p.txn, p.ExecCfg().Codec, targetDbDesc.ID, newTn.Table(), + params.ctx, params.p.txn, p.ExecCfg().Codec, targetDbDesc.GetID(), newTn.Table(), ) if err == nil && exists { // Try and see what kind of object we collided with. @@ -159,7 +159,7 @@ func (n *renameTableNode) startExec(params runParams) error { if err != nil { return err } - return makeObjectAlreadyExistsError(desc, newTn.Table()) + return makeObjectAlreadyExistsError(desc.DescriptorProto(), newTn.Table()) } else if err != nil { return err } diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index e9da41b42e1e..d586c4a68d34 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -24,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -33,11 +33,15 @@ var _ resolver.SchemaResolver = &planner{} // ResolveUncachedDatabaseByName looks up a database name from the store. func (p *planner) ResolveUncachedDatabaseByName( ctx context.Context, dbName string, required bool, -) (res *UncachedDatabaseDescriptor, err error) { +) (res *sqlbase.ImmutableDatabaseDescriptor, err error) { p.runWithOptions(resolveFlags{skipCache: true}, func() { - res, err = p.LogicalSchemaAccessor().GetDatabaseDesc( + var desc sqlbase.DatabaseDescriptorInterface + desc, err = p.LogicalSchemaAccessor().GetDatabaseDesc( ctx, p.txn, p.ExecCfg().Codec, dbName, p.CommonLookupFlags(required), ) + if desc != nil { + res = desc.(*sqlbase.ImmutableDatabaseDescriptor) + } }) return res, err } @@ -59,11 +63,16 @@ func (p *planner) runWithOptions(flags resolveFlags, fn func()) { defer func(prev bool) { p.avoidCachedDescriptors = prev }(p.avoidCachedDescriptors) p.avoidCachedDescriptors = true } + if flags.contextDatabaseID != sqlbase.InvalidID { + defer func(prev sqlbase.ID) { p.contextDatabaseID = prev }(p.contextDatabaseID) + p.contextDatabaseID = flags.contextDatabaseID + } fn() } type resolveFlags struct { - skipCache bool + skipCache bool + contextDatabaseID sqlbase.ID } func (p *planner) ResolveMutableTableDescriptor( @@ -100,11 +109,11 @@ func (p *planner) LookupSchema( if err != nil || dbDesc == nil { return false, nil, err } - found, _, err = sc.IsValidSchema(ctx, p.txn, p.ExecCfg().Codec, dbDesc.ID, scName) + found, _, err = sc.IsValidSchema(ctx, p.txn, p.ExecCfg().Codec, dbDesc.GetID(), scName) if err != nil { return false, nil, err } - return found, dbDesc, nil + return found, dbDesc.(*sqlbase.ImmutableDatabaseDescriptor), nil } // LookupObject implements the tree.ObjectNameExistingResolver interface. @@ -134,8 +143,8 @@ func (p *planner) CommonLookupFlags(required bool) tree.CommonLookupFlags { } func (p *planner) makeTypeLookupFn(ctx context.Context) sqlbase.TypeLookupFunc { - return func(id sqlbase.ID) (*tree.TypeName, *TypeDescriptor, error) { - return resolver.ResolveTypeDescByID(ctx, p.txn, p.ExecCfg().Codec, id) + return func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { + return resolver.ResolveTypeDescByID(ctx, p.txn, p.ExecCfg().Codec, id, tree.ObjectLookupFlags{}) } } @@ -156,12 +165,26 @@ func (p *planner) ResolveType( } tn := tree.MakeTypeNameFromPrefix(prefix, tree.Name(name.Object())) tdesc := desc.(*sqlbase.ImmutableTypeDescriptor) + + // Disllow cross-database type resolution. Note that we check + // p.contextDatabaseID != sqlbase.InvalidID when we have been restricted to + // accessing types in the database with ID = p.contextDatabaseID by + // p.runWithOptions. So, check to see if the resolved descriptor's parentID + // matches, unless the descriptor's parentID is invalid. This could happen + // when the type being resolved is a builtin type prefaced with a virtual + // schema like `pg_catalog.int`. Resolution for these types returns a dummy + // TypeDescriptor, so ignore those cases. + if p.contextDatabaseID != sqlbase.InvalidID && tdesc.ParentID != sqlbase.InvalidID && tdesc.ParentID != p.contextDatabaseID { + return nil, pgerror.Newf( + pgcode.FeatureNotSupported, "cross database type references are not supported: %s", tn.String()) + } + return tdesc.MakeTypesT(&tn, p.makeTypeLookupFn(ctx)) } // ResolveTypeByID implements the tree.TypeResolver interface. func (p *planner) ResolveTypeByID(ctx context.Context, id uint32) (*types.T, error) { - name, desc, err := resolver.ResolveTypeDescByID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id)) + name, desc, err := resolver.ResolveTypeDescByID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id), tree.ObjectLookupFlags{}) if err != nil { return nil, err } @@ -176,17 +199,11 @@ func (p *planner) maybeHydrateTypesInDescriptor( ) error { // As of now, only {Mutable,Immutable}TableDescriptor have types.T that // need to be hydrated. - switch desc := objDesc.(type) { - case *sqlbase.MutableTableDescriptor: - if err := sqlbase.HydrateTypesInTableDescriptor(desc.TableDesc(), p.makeTypeLookupFn(ctx)); err != nil { - return err - } - case *sqlbase.ImmutableTableDescriptor: - if err := sqlbase.HydrateTypesInTableDescriptor(desc.TableDesc(), p.makeTypeLookupFn(ctx)); err != nil { - return err - } + tableDesc := objDesc.(catalog.Descriptor).TableDesc() + if tableDesc == nil { + return nil } - return nil + return sqlbase.HydrateTypesInTableDescriptor(tableDesc, p.makeTypeLookupFn(ctx)) } // ObjectLookupFlags is part of the resolver.SchemaResolver interface. @@ -200,12 +217,12 @@ func (p *planner) ObjectLookupFlags(required, requireMutable bool) tree.ObjectLo // getDescriptorsFromTargetList fetches the descriptors for the targets. func getDescriptorsFromTargetList( ctx context.Context, p *planner, targets tree.TargetList, -) ([]sqlbase.DescriptorProto, error) { +) ([]sqlbase.DescriptorInterface, error) { if targets.Databases != nil { if len(targets.Databases) == 0 { return nil, errNoDatabase } - descs := make([]sqlbase.DescriptorProto, 0, len(targets.Databases)) + descs := make([]sqlbase.DescriptorInterface, 0, len(targets.Databases)) for _, database := range targets.Databases { descriptor, err := p.ResolveUncachedDatabaseByName(ctx, string(database), true /*required*/) if err != nil { @@ -222,7 +239,7 @@ func getDescriptorsFromTargetList( if len(targets.Tables) == 0 { return nil, errNoTable } - descs := make([]sqlbase.DescriptorProto, 0, len(targets.Tables)) + descs := make([]sqlbase.DescriptorInterface, 0, len(targets.Tables)) for _, tableTarget := range targets.Tables { tableGlob, err := tableTarget.NormalizeTablePattern() if err != nil { @@ -269,7 +286,7 @@ func (p *planner) getQualifiedTableName( return "", err } tbName := tree.MakeTableNameWithSchema( - tree.Name(dbDesc.Name), + tree.Name(dbDesc.GetName()), tree.Name(schemaName), tree.Name(desc.Name), ) @@ -485,10 +502,10 @@ func (r *fkSelfResolver) LookupObject( type internalLookupCtx struct { dbNames map[sqlbase.ID]string dbIDs []sqlbase.ID - dbDescs map[sqlbase.ID]*DatabaseDescriptor - tbDescs map[sqlbase.ID]*TableDescriptor + dbDescs map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor + tbDescs map[sqlbase.ID]*ImmutableTableDescriptor tbIDs []sqlbase.ID - typDescs map[sqlbase.ID]*TypeDescriptor + typDescs map[sqlbase.ID]*sqlbase.ImmutableTypeDescriptor typIDs []sqlbase.ID } @@ -496,43 +513,57 @@ type internalLookupCtx struct { // database descriptor using the table's ID. type tableLookupFn = *internalLookupCtx -func newInternalLookupCtx( - descs []sqlbase.DescriptorProto, prefix *DatabaseDescriptor, +// newInternalLookupCtxFromDescriptors "unwraps" the descriptors into the +// appropriate implementation of DescriptorInterface before constructing a +// new internalLookupCtx. It is intended only for use when dealing with backups. +func newInternalLookupCtxFromDescriptors( + rawDescs []sqlbase.Descriptor, prefix *sqlbase.ImmutableDatabaseDescriptor, ) *internalLookupCtx { - wrappedDescs := make([]sqlbase.Descriptor, len(descs)) - for i, desc := range descs { - wrappedDescs[i] = *sqlbase.WrapDescriptor(desc) + descs := make([]sqlbase.DescriptorInterface, len(rawDescs)) + for i := range rawDescs { + desc := &rawDescs[i] + switch t := desc.Union.(type) { + case *sqlbase.Descriptor_Database: + descs[i] = sqlbase.NewImmutableDatabaseDescriptor(*t.Database) + case *sqlbase.Descriptor_Table: + descs[i] = sqlbase.NewImmutableTableDescriptor(*t.Table) + case *sqlbase.Descriptor_Type: + descs[i] = sqlbase.NewImmutableTypeDescriptor(*t.Type) + case *sqlbase.Descriptor_Schema: + descs[i] = sqlbase.NewImmutableSchemaDescriptor(*t.Schema) + } } - return newInternalLookupCtxFromDescriptors(wrappedDescs, prefix) + return newInternalLookupCtx(descs, prefix) } -func newInternalLookupCtxFromDescriptors( - descs []sqlbase.Descriptor, prefix *DatabaseDescriptor, +func newInternalLookupCtx( + descs []sqlbase.DescriptorInterface, prefix *sqlbase.ImmutableDatabaseDescriptor, ) *internalLookupCtx { dbNames := make(map[sqlbase.ID]string) - dbDescs := make(map[sqlbase.ID]*DatabaseDescriptor) - tbDescs := make(map[sqlbase.ID]*TableDescriptor) - typDescs := make(map[sqlbase.ID]*TypeDescriptor) + dbDescs := make(map[sqlbase.ID]*sqlbase.ImmutableDatabaseDescriptor) + tbDescs := make(map[sqlbase.ID]*ImmutableTableDescriptor) + typDescs := make(map[sqlbase.ID]*sqlbase.ImmutableTypeDescriptor) var tbIDs, typIDs, dbIDs []sqlbase.ID // Record database descriptors for name lookups. - for _, desc := range descs { - if database := desc.GetDatabase(); database != nil { - dbNames[database.ID] = database.Name - dbDescs[database.ID] = database - if prefix == nil || prefix.ID == database.ID { - dbIDs = append(dbIDs, database.ID) + for i := range descs { + switch desc := descs[i].(type) { + case *sqlbase.ImmutableDatabaseDescriptor: + dbNames[desc.GetID()] = desc.GetName() + dbDescs[desc.GetID()] = desc + if prefix == nil || prefix.GetID() == desc.GetID() { + dbIDs = append(dbIDs, desc.GetID()) } - } else if table := desc.Table(hlc.Timestamp{}); table != nil { - tbDescs[table.ID] = table - if prefix == nil || prefix.ID == table.ParentID { + case *sqlbase.ImmutableTableDescriptor: + tbDescs[desc.GetID()] = desc + if prefix == nil || prefix.GetID() == desc.ParentID { // Only make the table visible for iteration if the prefix was included. - tbIDs = append(tbIDs, table.ID) + tbIDs = append(tbIDs, desc.ID) } - } else if typ := desc.GetType(); typ != nil { - typDescs[typ.ID] = typ - if prefix == nil || prefix.ID == typ.ParentID { + case *sqlbase.ImmutableTypeDescriptor: + typDescs[desc.GetID()] = desc + if prefix == nil || prefix.GetID() == desc.ParentID { // Only make the type visible for iteration if the prefix was included. - typIDs = append(typIDs, typ.ID) + typIDs = append(typIDs, desc.GetID()) } } } @@ -547,7 +578,9 @@ func newInternalLookupCtxFromDescriptors( } } -func (l *internalLookupCtx) getDatabaseByID(id sqlbase.ID) (*DatabaseDescriptor, error) { +func (l *internalLookupCtx) getDatabaseByID( + id sqlbase.ID, +) (*sqlbase.ImmutableDatabaseDescriptor, error) { db, ok := l.dbDescs[id] if !ok { return nil, sqlbase.NewUndefinedDatabaseError(fmt.Sprintf("[%d]", id)) @@ -561,7 +594,7 @@ func (l *internalLookupCtx) getTableByID(id sqlbase.ID) (*TableDescriptor, error return nil, sqlbase.NewUndefinedRelationError( tree.NewUnqualifiedTableName(tree.Name(fmt.Sprintf("[%d]", id)))) } - return tb, nil + return tb.TableDesc(), nil } func (l *internalLookupCtx) getParentName(table *TableDescriptor) string { @@ -591,22 +624,22 @@ func getParentAsTableName( if err != nil { return tree.TableName{}, err } - parentName = tree.MakeTableName(tree.Name(parentDbDesc.Name), tree.Name(parentTable.Name)) - parentName.ExplicitSchema = parentDbDesc.Name != dbPrefix + parentName = tree.MakeTableName(tree.Name(parentDbDesc.GetName()), tree.Name(parentTable.Name)) + parentName.ExplicitSchema = parentDbDesc.GetName() != dbPrefix return parentName, nil } // getTableAsTableName returns a TableName object for a given TableDescriptor. func getTableAsTableName( - l simpleSchemaResolver, table *sqlbase.TableDescriptor, dbPrefix string, + l simpleSchemaResolver, table *sqlbase.ImmutableTableDescriptor, dbPrefix string, ) (tree.TableName, error) { var tableName tree.TableName tableDbDesc, err := l.getDatabaseByID(table.ParentID) if err != nil { return tree.TableName{}, err } - tableName = tree.MakeTableName(tree.Name(tableDbDesc.Name), tree.Name(table.Name)) - tableName.ExplicitSchema = tableDbDesc.Name != dbPrefix + tableName = tree.MakeTableName(tree.Name(tableDbDesc.GetName()), tree.Name(table.Name)) + tableName.ExplicitSchema = tableDbDesc.GetName() != dbPrefix return tableName, nil } @@ -688,6 +721,6 @@ func (p *planner) ResolvedName(u *tree.UnresolvedObjectName) tree.ObjectName { } type simpleSchemaResolver interface { - getDatabaseByID(id sqlbase.ID) (*DatabaseDescriptor, error) + getDatabaseByID(id sqlbase.ID) (*sqlbase.ImmutableDatabaseDescriptor, error) getTableByID(id sqlbase.ID) (*TableDescriptor, error) } diff --git a/pkg/sql/row/cascader.go b/pkg/sql/row/cascader.go index 2ef6775c8f96..d9448eb2348f 100644 --- a/pkg/sql/row/cascader.go +++ b/pkg/sql/row/cascader.go @@ -975,7 +975,7 @@ func (c *cascader) updateRows( } return nil, nil, nil, 0, pgerror.Newf(pgcode.NullValueNotAllowed, "cannot cascade a null value into %q as it violates a NOT NULL constraint", - tree.ErrString(tree.NewUnresolvedName(database.Name, schema, referencingTable.Name, column.Name))) + tree.ErrString(tree.NewUnresolvedName(database.GetName(), schema, referencingTable.Name, column.Name))) } } continue diff --git a/pkg/sql/row/fk_table_lookup.go b/pkg/sql/row/fk_table_lookup.go index 8f02d2f84c93..8c25b1d78268 100644 --- a/pkg/sql/row/fk_table_lookup.go +++ b/pkg/sql/row/fk_table_lookup.go @@ -125,12 +125,12 @@ func NoLookup(_ context.Context, _ TableID) (catalog.TableEntry, error) { // CheckPrivilegeFunction is the function type used by MakeFkMetadata that will // check the privileges of the current user to access specific tables. -type CheckPrivilegeFunction func(context.Context, sqlbase.DescriptorProto, privilege.Kind) error +type CheckPrivilegeFunction func(context.Context, sqlbase.DescriptorInterface, privilege.Kind) error // NoCheckPrivilege is a stub that can be used to not actually verify privileges. // This can be used when the FK work is initialized from a pre-populated // FkTableMetadata map. -func NoCheckPrivilege(_ context.Context, _ sqlbase.DescriptorProto, _ privilege.Kind) error { +func NoCheckPrivilege(_ context.Context, _ sqlbase.DescriptorInterface, _ privilege.Kind) error { return nil } diff --git a/pkg/sql/rowexec/backfiller_test.go b/pkg/sql/rowexec/backfiller_test.go index cbd7d74635e0..9d9f82cddf51 100644 --- a/pkg/sql/rowexec/backfiller_test.go +++ b/pkg/sql/rowexec/backfiller_test.go @@ -72,12 +72,13 @@ func TestWriteResumeSpan(t *testing.T) { } registry := server.JobRegistry().(*jobs.Registry) - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") if err := kvDB.Put( ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), - sqlbase.WrapDescriptor(tableDesc), + tableDesc.DescriptorProto(), ); err != nil { t.Fatal(err) } diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 0af6f1466efa..94cf82f453f2 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -109,23 +110,16 @@ type scanNode struct { lockingWaitPolicy sqlbase.ScanLockingWaitPolicy } -// scanColumnsConfig controls the "schema" of a scan node. The zero value is the -// default: all "public" columns. -// Note that not all columns in the schema are read and decoded; that is further -// controlled by scanNode.valNeededForCol. +// scanColumnsConfig controls the "schema" of a scan node. type scanColumnsConfig struct { - // If set, only these columns are part of the scan node schema, in this order - // (with the caveat that the addUnwantedAsHidden flag below can add more - // columns). Non public columns can only be added if allowed by the visibility - // flag below. - // If not set, then all visible columns will be part of the scan node schema, - // as specified by the visibility flag below. The addUnwantedAsHidden flag - // is ignored in this case. + // wantedColumns contains all the columns are part of the scan node schema, + // in this order (with the caveat that the addUnwantedAsHidden flag below + // can add more columns). Non public columns can only be added if allowed + // by the visibility flag below. wantedColumns []tree.ColumnID // When set, the columns that are not in the wantedColumns list are added to - // the list of columns as hidden columns. Only useful in conjunction with - // wantedColumns. + // the list of columns as hidden columns. addUnwantedAsHidden bool // If visibility is set to execinfra.ScanVisibilityPublicAndNotPublic, then @@ -133,7 +127,14 @@ type scanColumnsConfig struct { visibility execinfrapb.ScanVisibility } -var publicColumnsCfg = scanColumnsConfig{} +func (cfg scanColumnsConfig) assertValidReqOrdering(reqOrdering exec.OutputOrdering) error { + for i := range reqOrdering { + if reqOrdering[i].ColIdx >= len(cfg.wantedColumns) { + return errors.Errorf("invalid reqOrdering: %v", reqOrdering) + } + } + return nil +} func (p *planner) Scan() *scanNode { n := scanNodePool.Get().(*scanNode) @@ -281,13 +282,7 @@ func initColsForScan( desc *sqlbase.ImmutableTableDescriptor, colCfg scanColumnsConfig, ) (cols []sqlbase.ColumnDescriptor, err error) { if colCfg.wantedColumns == nil { - // Add all active and maybe mutation columns. - if colCfg.visibility == execinfra.ScanVisibilityPublic { - cols = desc.Columns - } else { - cols = desc.ReadableColumns - } - return cols, nil + return nil, errors.AssertionFailedf("unexpectedly wantedColumns is nil") } cols = make([]sqlbase.ColumnDescriptor, 0, len(desc.ReadableColumns)) diff --git a/pkg/sql/schema_accessors.go b/pkg/sql/schema_accessors.go index 85c51f6ac115..4fac8932c5b8 100644 --- a/pkg/sql/schema_accessors.go +++ b/pkg/sql/schema_accessors.go @@ -40,7 +40,7 @@ type ( DatabaseDescriptor = sqlbase.DatabaseDescriptor // UncachedDatabaseDescriptor is provided for convenience and to make the // interface definitions below more intuitive. - UncachedDatabaseDescriptor = sqlbase.DatabaseDescriptor + UncachedDatabaseDescriptor = sqlbase.ImmutableDatabaseDescriptor // MutableTableDescriptor is provided for convenience and to make the // interface definitions below more intuitive. MutableTableDescriptor = sqlbase.MutableTableDescriptor diff --git a/pkg/sql/schema_change_migrations_test.go b/pkg/sql/schema_change_migrations_test.go index 852f6ea27afa..ceeb16a65a9e 100644 --- a/pkg/sql/schema_change_migrations_test.go +++ b/pkg/sql/schema_change_migrations_test.go @@ -301,9 +301,9 @@ func migrateJobToOldFormat( ) error { ctx := context.Background() - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if schemaChangeType == CreateTable { - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "new_table") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "new_table") } if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -362,7 +362,7 @@ func migrateJobToOldFormat( return err } return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( - keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + keys.SystemSQLCodec, tableDesc.GetID()), tableDesc.DescriptorProto(), ) }) } @@ -427,7 +427,7 @@ func migrateGCJobToOldFormat( return nil case DropIndex: - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if l := len(tableDesc.GCMutations); l != 1 { return errors.AssertionFailedf("expected exactly 1 GCMutation, found %d", l) } @@ -447,7 +447,7 @@ func migrateGCJobToOldFormat( return err } return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( - keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + keys.SystemSQLCodec, tableDesc.GetID()), tableDesc.DescriptorProto(), ) }) default: @@ -875,7 +875,7 @@ func TestGCJobCreated(t *testing.T) { if _, err := sqlDB.Exec(`CREATE DATABASE t; CREATE TABLE t.test();`); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tableDesc.State = sqlbase.TableDescriptor_DROP tableDesc.Version++ tableDesc.DropTime = 1 @@ -889,7 +889,7 @@ func TestGCJobCreated(t *testing.T) { return err } return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( - keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + keys.SystemSQLCodec, tableDesc.GetID()), tableDesc.DescriptorProto(), ) }); err != nil { t.Fatal(err) @@ -958,7 +958,7 @@ func TestMissingMutation(t *testing.T) { // To get the table descriptor into the (invalid) state we're trying to test, // clear the mutations on the table descriptor. - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tableDesc.Mutations = nil require.NoError( t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -966,7 +966,7 @@ func TestMissingMutation(t *testing.T) { return err } return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( - keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + keys.SystemSQLCodec, tableDesc.GetID()), tableDesc.DescriptorProto(), ) }), ) diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 9f668ddf5713..9f260693b875 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -118,8 +118,8 @@ func NewSchemaChangerForTesting( } // isPermanentSchemaChangeError returns true if the error results in -// a permanent failure of a schema change. This function is a whitelist -// instead of a blacklist: only known safe errors are confirmed to not be +// a permanent failure of a schema change. This function is a allowlist +// instead of a blocklist: only known safe errors are confirmed to not be // permanent errors. Anything unknown is assumed to be permanent. func isPermanentSchemaChangeError(err error) bool { if err == nil { @@ -341,7 +341,7 @@ func (sc *SchemaChanger) drainNames(ctx context.Context) error { // Publish a new version with all the names drained after everyone // has seen the version with the new name. All the draining names // can be reused henceforth. - var namesToReclaim []sqlbase.TableDescriptor_NameInfo + var namesToReclaim []sqlbase.NameInfo _, err := sc.leaseMgr.Publish( ctx, sc.tableID, @@ -1550,7 +1550,7 @@ func (*SchemaChangerTestingKnobs) ModuleTestingKnobs() {} // createSchemaChangeEvalCtx creates an extendedEvalContext() to be used for backfills. // // TODO(andrei): This EvalContext() will be broken for backfills trying to use -// functions marked with distsqlBlacklist. +// functions marked with distsqlBlocklist. // Also, the SessionTracing inside the context is unrelated to the one // used in the surrounding SQL session, so session tracing is unable // to capture schema change activity. @@ -1690,7 +1690,7 @@ func (r schemaChangeResumer) Resume( ) return nil case !isPermanentSchemaChangeError(scErr): - // Check if the error is on a whitelist of errors we should retry on, + // Check if the error is on a allowlist of errors we should retry on, // including the schema change not having the first mutation in line. default: // All other errors lead to a failed job. @@ -1808,7 +1808,7 @@ func (r schemaChangeResumer) OnFailOrCancel(ctx context.Context, phs interface{} // wrapping it in a retry error. return rollbackErr case !isPermanentSchemaChangeError(rollbackErr): - // Check if the error is on a whitelist of errors we should retry on, and + // Check if the error is on a allowlist of errors we should retry on, and // have the job registry retry. return jobs.NewRetryJobError(rollbackErr.Error()) default: diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index fd88e00ce3a2..b55d532407fc 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -119,7 +119,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } // Read table descriptor for version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") expectedVersion := tableDesc.Version ctx := context.Background() @@ -129,7 +129,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -155,13 +155,15 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); tableDesc.NextMutationID++ // Run state machine in both directions. - for _, direction := range []sqlbase.DescriptorMutation_Direction{sqlbase.DescriptorMutation_ADD, sqlbase.DescriptorMutation_DROP} { + for _, direction := range []sqlbase.DescriptorMutation_Direction{ + sqlbase.DescriptorMutation_ADD, sqlbase.DescriptorMutation_DROP, + } { tableDesc.Mutations[0].Direction = direction expectedVersion++ if err := kvDB.Put( ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), - sqlbase.WrapDescriptor(tableDesc), + tableDesc.DescriptorProto(), ); err != nil { t.Fatal(err) } @@ -176,7 +178,8 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -188,7 +191,8 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } } // RunStateMachineBeforeBackfill() doesn't complete the schema change. - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Mutations) == 0 { t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc) } @@ -217,7 +221,8 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } // Read table descriptor for version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") // A long running schema change operation runs through // a state machine that increments the version by 3. @@ -238,7 +243,8 @@ CREATE INDEX foo ON t.test (v) // Wait until index is created. for r := retry.Start(retryOpts); r.Next(); { - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) == 1 { break } @@ -250,7 +256,8 @@ CREATE INDEX foo ON t.test (v) mTest.CheckQueryResults(t, indexQuery, [][]string{{"b"}, {"d"}}) // Ensure that the version has been incremented. - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -263,7 +270,8 @@ CREATE INDEX foo ON t.test (v) for r := retry.Start(retryOpts); r.Next(); { // Ensure that the version gets incremented. - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") name := tableDesc.Indexes[0].Name if name != "ufo" { t.Fatalf("bad index name %s", name) @@ -282,7 +290,8 @@ CREATE INDEX foo ON t.test (v) } // Wait until indexes are created. for r := retry.Start(retryOpts); r.Next(); { - tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") + tableDesc = sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) == count+1 { break } @@ -3557,7 +3566,8 @@ CREATE TABLE d.t ( `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "t") + tableDesc := sqlbase.TestingGetMutableExistingTableDescriptor( + kvDB, keys.SystemSQLCodec, "d", "t") // Verify that this descriptor uses the new STORING encoding. Overwrite it // with one that uses the old encoding. for i, index := range tableDesc.Indexes { @@ -3574,7 +3584,7 @@ CREATE TABLE d.t ( if err := kvDB.Put( context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.GetID()), - sqlbase.WrapDescriptor(tableDesc), + tableDesc.DescriptorProto(), ); err != nil { t.Fatal(err) } @@ -6000,7 +6010,7 @@ CREATE UNIQUE INDEX i ON t.test(v); return nil } injectedError = true - // Any error not on the whitelist of retriable errors is considered permanent. + // Any error not on the allowlist of retriable errors is considered permanent. return errors.New("permanent error") }, }, @@ -6025,7 +6035,7 @@ CREATE UNIQUE INDEX i ON t.test(v); return nil } injectedError = true - // Any error not on the whitelist of retriable errors is considered permanent. + // Any error not on the allowlist of retriable errors is considered permanent. return errors.New("permanent error") }, }, diff --git a/pkg/sql/schemaexpr/check_constraint.go b/pkg/sql/schemaexpr/check_constraint.go index e1e708f39f88..5b1ef122d178 100644 --- a/pkg/sql/schemaexpr/check_constraint.go +++ b/pkg/sql/schemaexpr/check_constraint.go @@ -101,7 +101,7 @@ func (b *CheckConstraintBuilder) Build( // Replace the column variables with dummyColumns so that they can be // type-checked. - replacedExpr, colIDs, err := replaceVars(&b.desc.TableDescriptor, expr) + replacedExpr, colIDs, err := replaceVars(b.desc, expr) if err != nil { return nil, err } diff --git a/pkg/sql/schemaexpr/column.go b/pkg/sql/schemaexpr/column.go index d8ca7cf01acd..4c4dede27ae2 100644 --- a/pkg/sql/schemaexpr/column.go +++ b/pkg/sql/schemaexpr/column.go @@ -70,7 +70,10 @@ func DequalifyColumnRefs( // DeserializeTableDescExpr performs this logic, but only returns a // tree.Expr to be clear that these returned expressions are not safe to Eval. func DeserializeTableDescExpr( - ctx context.Context, semaCtx *tree.SemaContext, desc *sqlbase.TableDescriptor, exprStr string, + ctx context.Context, + semaCtx *tree.SemaContext, + desc sqlbase.TableDescriptorInterface, + exprStr string, ) (tree.Expr, error) { expr, err := parser.ParseExpr(exprStr) if err != nil { @@ -92,7 +95,7 @@ func DeserializeTableDescExpr( func FormatColumnForDisplay( ctx context.Context, semaCtx *tree.SemaContext, - tbl *sqlbase.TableDescriptor, + tbl *sqlbase.ImmutableTableDescriptor, desc *sqlbase.ColumnDescriptor, ) (string, error) { f := tree.NewFmtCtx(tree.FmtSimple) @@ -213,7 +216,7 @@ func (d *dummyColumn) ResolvedType() *types.T { // If the expression references a column that does not exist in the table // descriptor, replaceVars errs with pgcode.UndefinedColumn. func replaceVars( - desc *sqlbase.TableDescriptor, rootExpr tree.Expr, + desc sqlbase.TableDescriptorInterface, rootExpr tree.Expr, ) (tree.Expr, sqlbase.TableColSet, error) { var colIDs sqlbase.TableColSet diff --git a/pkg/sql/schemaexpr/computed_column.go b/pkg/sql/schemaexpr/computed_column.go index 69f4db1222c4..29a507accf8e 100644 --- a/pkg/sql/schemaexpr/computed_column.go +++ b/pkg/sql/schemaexpr/computed_column.go @@ -102,7 +102,7 @@ func (v *ComputedColumnValidator) Validate(d *tree.ColumnTableDef) error { // Replace the column variables with dummyColumns so that they can be // type-checked. - replacedExpr, _, err := replaceVars(&v.desc.TableDescriptor, d.Computed.Expr) + replacedExpr, _, err := replaceVars(v.desc, d.Computed.Expr) if err != nil { return err } diff --git a/pkg/sql/schemaexpr/partial_index.go b/pkg/sql/schemaexpr/partial_index.go index ce41d8ec9b93..6bb04ab3ce98 100644 --- a/pkg/sql/schemaexpr/partial_index.go +++ b/pkg/sql/schemaexpr/partial_index.go @@ -58,7 +58,7 @@ func NewIndexPredicateValidator( func (v *IndexPredicateValidator) Validate(expr tree.Expr) (tree.Expr, error) { // Replace the column variables with dummyColumns so that they can be // type-checked. - replacedExpr, _, err := replaceVars(&v.desc.TableDescriptor, expr) + replacedExpr, _, err := replaceVars(v.desc, expr) if err != nil { return nil, err } diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index 893018cbb1f7..9344988b6394 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -160,7 +160,7 @@ func (n *scrubNode) startScrubDatabase(ctx context.Context, p *planner, name *tr return err } - schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.ID) + schemas, err := p.Tables().GetSchemasForDatabase(ctx, p.txn, dbDesc.GetID()) if err != nil { return err } diff --git a/pkg/sql/sem/builtins/aggregate_builtins.go b/pkg/sql/sem/builtins/aggregate_builtins.go index 275453fee4a2..667cfcfe4319 100644 --- a/pkg/sql/sem/builtins/aggregate_builtins.go +++ b/pkg/sql/sem/builtins/aggregate_builtins.go @@ -1717,12 +1717,12 @@ func (a *intSumAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.D // And overflow was detected; go to large integers, but keep the // sum computed so far. a.large = true - a.decSum.SetFinite(a.intSum, 0) + a.decSum.SetInt64(a.intSum) } } if a.large { - a.tmpDec.SetFinite(t, 0) + a.tmpDec.SetInt64(t) _, err := tree.ExactCtx.Add(&a.decSum, &a.decSum, &a.tmpDec) if err != nil { return err @@ -1745,7 +1745,7 @@ func (a *intSumAggregate) Result() (tree.Datum, error) { if a.large { dd.Set(&a.decSum) } else { - dd.SetFinite(a.intSum, 0) + dd.SetInt64(a.intSum) } return dd, nil } @@ -1815,7 +1815,7 @@ func (a *decimalSumAggregate) Result() (tree.Datum, error) { // Reset implements tree.AggregateFunc interface. func (a *decimalSumAggregate) Reset(ctx context.Context) { - a.sum.SetFinite(0, 0) + a.sum.SetInt64(0) a.sawNonNull = false a.reset(ctx) } @@ -1951,7 +1951,7 @@ func (a *intSqrDiffAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tr return nil } - a.tmpDec.SetFinite(int64(tree.MustBeDInt(datum)), 0) + a.tmpDec.SetInt64(int64(tree.MustBeDInt(datum))) return a.agg.Add(ctx, &a.tmpDec) } @@ -2121,9 +2121,9 @@ func (a *decimalSqrDiffAggregate) Result() (tree.Datum, error) { // Reset implements tree.AggregateFunc interface. func (a *decimalSqrDiffAggregate) Reset(ctx context.Context) { - a.count.SetFinite(0, 0) - a.mean.SetFinite(0, 0) - a.sqrDiff.SetFinite(0, 0) + a.count.SetInt64(0) + a.mean.SetInt64(0) + a.sqrDiff.SetInt64(0) a.reset(ctx) } @@ -2317,9 +2317,9 @@ func (a *decimalSumSqrDiffsAggregate) Result() (tree.Datum, error) { // Reset implements tree.AggregateFunc interface. func (a *decimalSumSqrDiffsAggregate) Reset(ctx context.Context) { - a.count.SetFinite(0, 0) - a.mean.SetFinite(0, 0) - a.sqrDiff.SetFinite(0, 0) + a.count.SetInt64(0) + a.mean.SetInt64(0) + a.sqrDiff.SetInt64(0) a.reset(ctx) } diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index a39ce0cb36cc..c1ee281ac941 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -1735,7 +1735,7 @@ CockroachDB supports the following flags: "nextval": makeBuiltin( tree.FunctionProperties{ Category: categorySequences, - DistsqlBlacklist: true, + DistsqlBlocklist: true, Impure: true, }, tree.Overload{ @@ -1761,7 +1761,7 @@ CockroachDB supports the following flags: "currval": makeBuiltin( tree.FunctionProperties{ Category: categorySequences, - DistsqlBlacklist: true, + DistsqlBlocklist: true, Impure: true, }, tree.Overload{ @@ -1809,7 +1809,7 @@ CockroachDB supports the following flags: "setval": makeBuiltin( tree.FunctionProperties{ Category: categorySequences, - DistsqlBlacklist: true, + DistsqlBlocklist: true, Impure: true, }, tree.Overload{ @@ -3023,7 +3023,7 @@ may increase either contention or retry errors, or both.`, "current_schema": makeBuiltin( tree.FunctionProperties{ Category: categorySystemInfo, - DistsqlBlacklist: true, + DistsqlBlocklist: true, }, tree.Overload{ Types: tree.ArgTypes{}, @@ -3060,7 +3060,7 @@ may increase either contention or retry errors, or both.`, "current_schemas": makeBuiltin( tree.FunctionProperties{ Category: categorySystemInfo, - DistsqlBlacklist: true, + DistsqlBlocklist: true, }, tree.Overload{ Types: tree.ArgTypes{{"include_pg_catalog", types.Bool}}, @@ -3801,7 +3801,7 @@ may increase either contention or retry errors, or both.`, "crdb_internal.is_admin": makeBuiltin( tree.FunctionProperties{ Category: categorySystemInfo, - DistsqlBlacklist: true, + DistsqlBlocklist: true, }, tree.Overload{ Types: tree.ArgTypes{}, diff --git a/pkg/sql/sem/builtins/geo_builtins.go b/pkg/sql/sem/builtins/geo_builtins.go index 6ed97ff883f1..3f2b13ca114d 100644 --- a/pkg/sql/sem/builtins/geo_builtins.go +++ b/pkg/sql/sem/builtins/geo_builtins.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/errors" + "github.com/golang/geo/s1" "github.com/twpayne/go-geom" "github.com/twpayne/go-geom/encoding/ewkb" ) @@ -47,6 +48,11 @@ const spheroidDistanceMessage = `"\n\nWhen operating on a spheroid, this functio `the closest two points using S2. The spheroid distance between these two points is calculated using GeographicLib. ` + `This follows observed PostGIS behavior.` +const ( + defaultGeoJSONDecimalDigits = 9 + defaultWKTDecimalDigits = 15 +) + // infoBuilder is used to build a detailed info string that is consistent between // geospatial data types. type infoBuilder struct { @@ -590,43 +596,139 @@ var geoBuiltins = map[string]builtinDefinition{ defProps(), geometryOverload1( func(_ *tree.EvalContext, g *tree.DGeometry) (tree.Datum, error) { - wkt, err := geo.EWKBToWKT(g.Geometry.EWKB()) + wkt, err := geo.EWKBToWKT(g.Geometry.EWKB(), defaultWKTDecimalDigits) return tree.NewDString(string(wkt)), err }, types.String, - infoBuilder{info: "Returns the WKT representation of a given Geometry."}, + infoBuilder{ + info: fmt.Sprintf("Returns the WKT representation of a given Geometry. A maximum of %d decimal digits is used.", defaultWKTDecimalDigits), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geometry", types.Geometry}, + {"maximum_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeometry) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + + if maxDecimalDigits < -1 { + return nil, errors.Newf("maximum_decimal_digits must be >= -1") + } + + wkt, err := geo.EWKBToWKT(g.Geometry.EWKB(), maxDecimalDigits) + return tree.NewDString(string(wkt)), err + }, + Info: infoBuilder{ + info: "Returns the WKT representation of a given Geometry. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the `.`. Use -1 to print as many digits as possible.", + }.String(), + Volatility: tree.VolatilityImmutable, + }, geographyOverload1( func(_ *tree.EvalContext, g *tree.DGeography) (tree.Datum, error) { - wkt, err := geo.EWKBToWKT(g.Geography.EWKB()) + wkt, err := geo.EWKBToWKT(g.Geography.EWKB(), defaultWKTDecimalDigits) return tree.NewDString(string(wkt)), err }, types.String, - infoBuilder{info: "Returns the WKT representation of a given Geography."}, + infoBuilder{ + info: fmt.Sprintf("Returns the WKT representation of a given Geography. A default of %d decimal digits is used.", defaultWKTDecimalDigits), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geography", types.Geography}, + {"maximum_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeography) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + + if maxDecimalDigits < -1 { + return nil, errors.Newf("maximum_decimal_digits must be >= -1") + } + + wkt, err := geo.EWKBToWKT(g.Geography.EWKB(), maxDecimalDigits) + return tree.NewDString(string(wkt)), err + }, + Info: infoBuilder{ + info: "Returns the WKT representation of a given Geography. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the `.`. Use -1 to print as many digits as possible.", + }.String(), + Volatility: tree.VolatilityImmutable, + }, ), "st_asewkt": makeBuiltin( defProps(), geometryOverload1( func(_ *tree.EvalContext, g *tree.DGeometry) (tree.Datum, error) { - ewkt, err := geo.EWKBToEWKT(g.Geometry.EWKB()) + ewkt, err := geo.EWKBToEWKT(g.Geometry.EWKB(), defaultWKTDecimalDigits) return tree.NewDString(string(ewkt)), err }, types.String, - infoBuilder{info: "Returns the EWKT representation of a given Geometry."}, + infoBuilder{ + info: fmt.Sprintf("Returns the EWKT representation of a given Geometry. A maximum of %d decimal digits is used.", defaultWKTDecimalDigits), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geometry", types.Geometry}, + {"maximum_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeometry) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + + if maxDecimalDigits < -1 { + return nil, errors.Newf("maximum_decimal_digits must be >= -1") + } + + ewkt, err := geo.EWKBToEWKT(g.Geometry.EWKB(), maxDecimalDigits) + return tree.NewDString(string(ewkt)), err + }, + Info: infoBuilder{ + info: "Returns the WKT representation of a given Geometry. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the `.`. Use -1 to print as many digits as possible.", + }.String(), + Volatility: tree.VolatilityImmutable, + }, geographyOverload1( func(_ *tree.EvalContext, g *tree.DGeography) (tree.Datum, error) { - ewkt, err := geo.EWKBToEWKT(g.Geography.EWKB()) + ewkt, err := geo.EWKBToEWKT(g.Geography.EWKB(), defaultWKTDecimalDigits) return tree.NewDString(string(ewkt)), err }, types.String, - infoBuilder{info: "Returns the EWKT representation of a given Geography."}, + infoBuilder{ + info: fmt.Sprintf("Returns the EWKT representation of a given Geography. A default of %d decimal digits is used.", defaultWKTDecimalDigits), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geography", types.Geography}, + {"maximum_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeography) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + + if maxDecimalDigits < -1 { + return nil, errors.Newf("maximum_decimal_digits must be >= -1") + } + + ewkt, err := geo.EWKBToEWKT(g.Geography.EWKB(), maxDecimalDigits) + return tree.NewDString(string(ewkt)), err + }, + Info: infoBuilder{ + info: "Returns the EWKT representation of a given Geography. The maximum_decimal_digits parameter controls the maximum decimal digits to print after the `.`. Use -1 to print as many digits as possible.", + }.String(), + Volatility: tree.VolatilityImmutable, + }, ), "st_asbinary": makeBuiltin( defProps(), @@ -731,7 +833,7 @@ var geoBuiltins = map[string]builtinDefinition{ defProps(), geometryOverload1( func(_ *tree.EvalContext, g *tree.DGeometry) (tree.Datum, error) { - return tree.NewDString(strings.ToUpper(fmt.Sprintf("%x", g.EWKB()))), nil + return tree.NewDString(g.Geometry.EWKBHex()), nil }, types.String, infoBuilder{info: "Returns the EWKB representation in hex of a given Geometry."}, @@ -739,7 +841,7 @@ var geoBuiltins = map[string]builtinDefinition{ ), geographyOverload1( func(_ *tree.EvalContext, g *tree.DGeography) (tree.Datum, error) { - return tree.NewDString(strings.ToUpper(fmt.Sprintf("%x", g.EWKB()))), nil + return tree.NewDString(g.Geography.EWKBHex()), nil }, types.String, infoBuilder{info: "Returns the EWKB representation in hex of a given Geography."}, @@ -837,22 +939,167 @@ var geoBuiltins = map[string]builtinDefinition{ defProps(), geometryOverload1( func(_ *tree.EvalContext, g *tree.DGeometry) (tree.Datum, error) { - geojson, err := geo.EWKBToGeoJSON(g.Geometry.EWKB()) + geojson, err := geo.EWKBToGeoJSON(g.Geometry.EWKB(), defaultGeoJSONDecimalDigits, geo.EWKBToGeoJSONFlagShortCRSIfNot4326) return tree.NewDString(string(geojson)), err }, types.String, - infoBuilder{info: "Returns the GeoJSON representation of a given Geometry."}, + infoBuilder{ + info: fmt.Sprintf( + "Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of %d decimal digits.", + defaultGeoJSONDecimalDigits, + ), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geometry", types.Geometry}, + {"max_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeometry) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + geojson, err := geo.EWKBToGeoJSON(g.Geometry.EWKB(), maxDecimalDigits, geo.EWKBToGeoJSONFlagShortCRSIfNot4326) + return tree.NewDString(string(geojson)), err + }, + Info: infoBuilder{ + info: `Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.`, + }.String(), + Volatility: tree.VolatilityImmutable, + }, + tree.Overload{ + Types: tree.ArgTypes{ + {"geometry", types.Geometry}, + {"max_decimal_digits", types.Int}, + {"options", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeometry) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + options := geo.EWKBToGeoJSONFlag(tree.MustBeDInt(args[2])) + geojson, err := geo.EWKBToGeoJSON(g.Geometry.EWKB(), maxDecimalDigits, options) + return tree.NewDString(string(geojson)), err + }, + Info: infoBuilder{ + info: `Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value. + +Options is a flag that can be bitmasked. The options are: +* 0: no option +* 1: GeoJSON BBOX +* 2: GeoJSON Short CRS (e.g EPSG:4326) +* 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326) +* 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry) +`}.String(), + Volatility: tree.VolatilityImmutable, + }, geographyOverload1( func(_ *tree.EvalContext, g *tree.DGeography) (tree.Datum, error) { - geojson, err := geo.EWKBToGeoJSON(g.Geography.EWKB()) + geojson, err := geo.EWKBToGeoJSON(g.Geography.EWKB(), defaultGeoJSONDecimalDigits, geo.EWKBToGeoJSONFlagZero) return tree.NewDString(string(geojson)), err }, types.String, - infoBuilder{info: "Returns the GeoJSON representation of a given Geography."}, + infoBuilder{ + info: fmt.Sprintf( + "Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of %d decimal digits.", + defaultGeoJSONDecimalDigits, + ), + }, tree.VolatilityImmutable, ), + tree.Overload{ + Types: tree.ArgTypes{ + {"geography", types.Geography}, + {"max_decimal_digits", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeography) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + geojson, err := geo.EWKBToGeoJSON(g.Geography.EWKB(), maxDecimalDigits, geo.EWKBToGeoJSONFlagZero) + return tree.NewDString(string(geojson)), err + }, + Info: infoBuilder{ + info: `Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.`, + }.String(), + Volatility: tree.VolatilityImmutable, + }, + tree.Overload{ + Types: tree.ArgTypes{ + {"geography", types.Geography}, + {"max_decimal_digits", types.Int}, + {"options", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeography) + maxDecimalDigits := int(tree.MustBeDInt(args[1])) + options := geo.EWKBToGeoJSONFlag(tree.MustBeDInt(args[2])) + geojson, err := geo.EWKBToGeoJSON(g.Geography.EWKB(), maxDecimalDigits, options) + return tree.NewDString(string(geojson)), err + }, + Info: infoBuilder{ + info: `Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value. + +Options is a flag that can be bitmasked. The options are: +* 0: no option (default for Geography) +* 1: GeoJSON BBOX +* 2: GeoJSON Short CRS (e.g EPSG:4326) +* 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326) +* 8: GeoJSON Short CRS if not EPSG:4326 +`}.String(), + Volatility: tree.VolatilityImmutable, + }, + ), + "st_project": makeBuiltin( + defProps(), + tree.Overload{ + Types: tree.ArgTypes{ + {"geography", types.Geography}, + {"distance", types.Float}, + {"azimuth", types.Float}, + }, + ReturnType: tree.FixedReturnType(types.Geography), + Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + g := args[0].(*tree.DGeography) + distance := float64(*args[1].(*tree.DFloat)) + azimuth := float64(*args[2].(*tree.DFloat)) + + geomT, err := g.AsGeomT() + if err != nil { + return nil, err + } + + point, ok := geomT.(*geom.Point) + if !ok { + return nil, errors.Newf("ST_Project(geography) is only valid for point inputs") + } + + projected, err := geogfn.Project(point, distance, s1.Angle(azimuth)) + if err != nil { + return nil, err + } + + geog, err := geo.NewGeographyFromGeom(projected) + if err != nil { + return nil, err + } + + return &tree.DGeography{Geography: geog}, nil + }, + Info: infoBuilder{ + info: `Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem. + +The distance is given in meters. Negative values are supported. + +The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.`, + }.String(), + Volatility: tree.VolatilityImmutable, + }, ), // diff --git a/pkg/sql/sem/builtins/math_builtins.go b/pkg/sql/sem/builtins/math_builtins.go index 03c62a3064fd..58a142870cc2 100644 --- a/pkg/sql/sem/builtins/math_builtins.go +++ b/pkg/sql/sem/builtins/math_builtins.go @@ -463,7 +463,7 @@ var mathBuiltins = map[string]builtinDefinition{ "negative.", tree.VolatilityImmutable), decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) { d := &tree.DDecimal{} - d.Decimal.SetFinite(int64(x.Sign()), 0) + d.Decimal.SetInt64(int64(x.Sign())) return d, nil }, "Determines the sign of `val`: **1** for positive; **0** for 0 values; **-1** for "+ "negative.", tree.VolatilityImmutable), diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index 2d1273d027b9..96b2d625d9ce 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -389,7 +389,7 @@ func makePGPrivilegeInquiryDef( } return builtinDefinition{ props: tree.FunctionProperties{ - DistsqlBlacklist: true, + DistsqlBlocklist: true, }, overloads: variants, } @@ -645,7 +645,7 @@ var pgBuiltins = map[string]builtinDefinition{ // pg_get_constraintdef functions like SHOW CREATE CONSTRAINT would if we // supported that statement. - "pg_get_constraintdef": makeBuiltin(tree.FunctionProperties{DistsqlBlacklist: true}, + "pg_get_constraintdef": makeBuiltin(tree.FunctionProperties{DistsqlBlocklist: true}, makePGGetConstraintDef(tree.ArgTypes{ {"constraint_oid", types.Oid}, {"pretty_bool", types.Bool}}), makePGGetConstraintDef(tree.ArgTypes{{"constraint_oid", types.Oid}}), @@ -726,14 +726,14 @@ var pgBuiltins = map[string]builtinDefinition{ // pg_get_indexdef functions like SHOW CREATE INDEX would if we supported that // statement. - "pg_get_indexdef": makeBuiltin(tree.FunctionProperties{DistsqlBlacklist: true}, + "pg_get_indexdef": makeBuiltin(tree.FunctionProperties{DistsqlBlocklist: true}, makePGGetIndexDef(tree.ArgTypes{{"index_oid", types.Oid}}), makePGGetIndexDef(tree.ArgTypes{{"index_oid", types.Oid}, {"column_no", types.Int}, {"pretty_bool", types.Bool}}), ), // pg_get_viewdef functions like SHOW CREATE VIEW but returns the same format as // PostgreSQL leaving out the actual 'CREATE VIEW table_name AS' portion of the statement. - "pg_get_viewdef": makeBuiltin(tree.FunctionProperties{DistsqlBlacklist: true}, + "pg_get_viewdef": makeBuiltin(tree.FunctionProperties{DistsqlBlocklist: true}, makePGGetViewDef(tree.ArgTypes{{"view_oid", types.Oid}}), makePGGetViewDef(tree.ArgTypes{{"view_oid", types.Oid}, {"pretty_bool", types.Bool}}), ), @@ -766,7 +766,7 @@ var pgBuiltins = map[string]builtinDefinition{ }, ), - "pg_get_userbyid": makeBuiltin(tree.FunctionProperties{DistsqlBlacklist: true}, + "pg_get_userbyid": makeBuiltin(tree.FunctionProperties{DistsqlBlocklist: true}, tree.Overload{ Types: tree.ArgTypes{ {"role_oid", types.Oid}, @@ -791,7 +791,7 @@ var pgBuiltins = map[string]builtinDefinition{ }, ), - "pg_sequence_parameters": makeBuiltin(tree.FunctionProperties{DistsqlBlacklist: true}, + "pg_sequence_parameters": makeBuiltin(tree.FunctionProperties{DistsqlBlocklist: true}, // pg_sequence_parameters is an undocumented Postgres builtin that returns // information about a sequence given its OID. It's nevertheless used by // at least one UI tool, so we provide an implementation for compatibility. @@ -1717,7 +1717,7 @@ SELECT description "current_setting": makeBuiltin( tree.FunctionProperties{ Category: categorySystemInfo, - DistsqlBlacklist: true, + DistsqlBlocklist: true, }, tree.Overload{ Types: tree.ArgTypes{{"setting_name", types.String}}, @@ -1743,7 +1743,7 @@ SELECT description "set_config": makeBuiltin( tree.FunctionProperties{ Category: categorySystemInfo, - DistsqlBlacklist: true, + DistsqlBlocklist: true, Impure: true, }, tree.Overload{ diff --git a/pkg/sql/sem/builtins/window_frame_builtins.go b/pkg/sql/sem/builtins/window_frame_builtins.go index a0bb103e4cee..20baa4dd37ba 100644 --- a/pkg/sql/sem/builtins/window_frame_builtins.go +++ b/pkg/sql/sem/builtins/window_frame_builtins.go @@ -403,7 +403,7 @@ func (w *avgWindowFunc) Compute( return &avg, err case *tree.DInt: dd := tree.DDecimal{} - dd.SetFinite(int64(*t), 0) + dd.SetInt64(int64(*t)) var avg tree.DDecimal count := apd.New(int64(frameSize), 0) _, err := tree.DecimalCtx.Quo(&avg.Decimal, &dd.Decimal, count) diff --git a/pkg/sql/sem/tree/casts.go b/pkg/sql/sem/tree/casts.go index 34699c9b03be..ddc76b8fac04 100644 --- a/pkg/sql/sem/tree/casts.go +++ b/pkg/sql/sem/tree/casts.go @@ -510,16 +510,16 @@ func PerformCast(ctx *EvalContext, d Datum, t *types.T) (Datum, error) { switch v := d.(type) { case *DBool: if *v { - dd.SetFinite(1, 0) + dd.SetInt64(1) } case *DInt: - dd.SetFinite(int64(*v), 0) + dd.SetInt64(int64(*v)) case *DDate: // TODO(mjibson): This cast is unsupported by postgres. Should we remove ours? if !v.IsFinite() { return nil, errDecOutOfRange } - dd.SetFinite(v.UnixEpochDays(), 0) + dd.SetInt64(v.UnixEpochDays()) case *DFloat: _, err = dd.SetFloat64(float64(*v)) case *DDecimal: diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index db472fff795a..b0571b388ed4 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -971,7 +971,7 @@ func (d *DDecimal) Compare(ctx *EvalContext, other Datum) int { case *DDecimal: v = &t.Decimal case *DInt: - v.SetFinite(int64(*t), 0) + v.SetInt64(int64(*t)) case *DFloat: if _, err := v.SetFloat64(float64(*t)); err != nil { panic(errors.NewAssertionErrorWithWrappedErrf(err, "decimal compare, unexpected error")) diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index 16aa602b2974..b8533cc52e3d 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -593,7 +593,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := &left.(*DDecimal).Decimal r := MustBeDInt(right) dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := ExactCtx.Add(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -607,7 +607,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := MustBeDInt(left) r := &right.(*DDecimal).Decimal dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := ExactCtx.Add(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -888,7 +888,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := &left.(*DDecimal).Decimal r := MustBeDInt(right) dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := ExactCtx.Sub(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -902,7 +902,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := MustBeDInt(left) r := &right.(*DDecimal).Decimal dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := ExactCtx.Sub(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -1213,7 +1213,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := &left.(*DDecimal).Decimal r := MustBeDInt(right) dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := ExactCtx.Mul(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -1227,7 +1227,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := MustBeDInt(left) r := &right.(*DDecimal).Decimal dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := ExactCtx.Mul(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -1311,9 +1311,9 @@ var BinOps = map[BinaryOperator]binOpOverload{ if rInt == 0 { return nil, ErrDivByZero } - div := ctx.getTmpDec().SetFinite(int64(rInt), 0) + div := ctx.getTmpDec().SetInt64(int64(rInt)) dd := &DDecimal{} - dd.SetFinite(int64(MustBeDInt(left)), 0) + dd.SetInt64(int64(MustBeDInt(left))) _, err := DecimalCtx.Quo(&dd.Decimal, &dd.Decimal, div) return dd, err }, @@ -1359,7 +1359,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := DecimalCtx.Quo(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -1376,7 +1376,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := DecimalCtx.Quo(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -1465,7 +1465,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := HighPrecisionCtx.QuoInteger(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -1482,7 +1482,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := HighPrecisionCtx.QuoInteger(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -1545,7 +1545,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := HighPrecisionCtx.Rem(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -1562,7 +1562,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ return nil, ErrDivByZero } dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := HighPrecisionCtx.Rem(&dd.Decimal, &dd.Decimal, r) return dd, err }, @@ -1741,7 +1741,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := &left.(*DDecimal).Decimal r := MustBeDInt(right) dd := &DDecimal{} - dd.SetFinite(int64(r), 0) + dd.SetInt64(int64(r)) _, err := DecimalCtx.Pow(&dd.Decimal, l, &dd.Decimal) return dd, err }, @@ -1755,7 +1755,7 @@ var BinOps = map[BinaryOperator]binOpOverload{ l := MustBeDInt(left) r := &right.(*DDecimal).Decimal dd := &DDecimal{} - dd.SetFinite(int64(l), 0) + dd.SetInt64(int64(l)) _, err := DecimalCtx.Pow(&dd.Decimal, &dd.Decimal, r) return dd, err }, diff --git a/pkg/sql/sem/tree/expr.go b/pkg/sql/sem/tree/expr.go index 769164a12281..1b12b32a9430 100644 --- a/pkg/sql/sem/tree/expr.go +++ b/pkg/sql/sem/tree/expr.go @@ -1401,9 +1401,9 @@ func (node *FuncExpr) IsImpure() bool { return node.fnProps != nil && node.fnProps.Impure } -// IsDistSQLBlacklist returns whether the function is not supported by DistSQL. -func (node *FuncExpr) IsDistSQLBlacklist() bool { - return node.fnProps != nil && node.fnProps.DistsqlBlacklist +// IsDistSQLBlocklist returns whether the function is not supported by DistSQL. +func (node *FuncExpr) IsDistSQLBlocklist() bool { + return node.fnProps != nil && node.fnProps.DistsqlBlocklist } // CanHandleNulls returns whether or not the function can handle null diff --git a/pkg/sql/sem/tree/format.go b/pkg/sql/sem/tree/format.go index 7913d0a1ad29..094cbe10587d 100644 --- a/pkg/sql/sem/tree/format.go +++ b/pkg/sql/sem/tree/format.go @@ -148,7 +148,10 @@ const ( FmtPgwireText FmtFlags = fmtPgwireFormat | FmtFlags(lex.EncBareStrings) // FmtParsable instructs the pretty-printer to produce a representation that - // can be parsed into an equivalent expression. + // can be parsed into an equivalent expression. If there is a chance that the + // formatted data will be stored durably on disk or sent to other nodes, + // then this formatting directive is not appropriate, and FmtSerializable + // should be used instead. FmtParsable FmtFlags = fmtDisambiguateDatumTypes | FmtParsableNumerics // FmtSerializable instructs the pretty-printer to produce a representation diff --git a/pkg/sql/sem/tree/function_definition.go b/pkg/sql/sem/tree/function_definition.go index 7e2203de6f4d..ad2eec54682f 100644 --- a/pkg/sql/sem/tree/function_definition.go +++ b/pkg/sql/sem/tree/function_definition.go @@ -65,15 +65,15 @@ type FunctionProperties struct { // as impure. Impure bool - // DistsqlBlacklist is set to true when a function depends on + // DistsqlBlocklist is set to true when a function depends on // members of the EvalContext that are not marshaled by DistSQL // (e.g. planner). Currently used for DistSQL to determine if // expressions can be evaluated on a different node without sending // over the EvalContext. // // TODO(andrei): Get rid of the planner from the EvalContext and then we can - // get rid of this blacklist. - DistsqlBlacklist bool + // get rid of this blocklist. + DistsqlBlocklist bool // Class is the kind of built-in function (normal/aggregate/window/etc.) Class FunctionClass diff --git a/pkg/sql/sem/tree/name_resolution.go b/pkg/sql/sem/tree/name_resolution.go index c514a2c3cbe0..e21ba733fb43 100644 --- a/pkg/sql/sem/tree/name_resolution.go +++ b/pkg/sql/sem/tree/name_resolution.go @@ -239,6 +239,10 @@ func (c *ColumnItem) Resolve( // ObjectNameTargetResolver is the helper interface to resolve object // names when the object is not expected to exist. +// +// TODO(ajwerner): figure out what scMeta is supposed to be. Currently it's +// the database but with User-defined schemas, should it be the schema? +// Should it be both? type ObjectNameTargetResolver interface { LookupSchema(ctx context.Context, dbName, scName string) (found bool, scMeta SchemaMeta, err error) } diff --git a/pkg/sql/sem/tree/normalize.go b/pkg/sql/sem/tree/normalize.go index e8ce2dce7422..88c38c724d1b 100644 --- a/pkg/sql/sem/tree/normalize.go +++ b/pkg/sql/sem/tree/normalize.go @@ -968,7 +968,7 @@ func ContainsVars(expr Expr) bool { var DecimalOne DDecimal func init() { - DecimalOne.SetFinite(1, 0) + DecimalOne.SetInt64(1) } // ReType ensures that the given numeric expression evaluates diff --git a/pkg/sql/serial.go b/pkg/sql/serial.go index 6e407ce304ad..c760ffc39efb 100644 --- a/pkg/sql/serial.go +++ b/pkg/sql/serial.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -48,7 +49,13 @@ var virtualSequenceOpts = tree.SequenceOptions{ // The ColumnTableDef is not mutated in-place; instead a new one is returned. func (p *planner) processSerialInColumnDef( ctx context.Context, d *tree.ColumnTableDef, tableName *TableName, -) (*tree.ColumnTableDef, *DatabaseDescriptor, *TableName, tree.SequenceOptions, error) { +) ( + *tree.ColumnTableDef, + *sqlbase.ImmutableDatabaseDescriptor, + *TableName, + tree.SequenceOptions, + error, +) { if !d.IsSerial { // Column is not SERIAL: nothing to do. return d, nil, nil, nil, nil diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index e16f85bf77d1..564c64ad61ab 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -211,7 +211,7 @@ func checkPrivilegeForSetZoneConfig(ctx context.Context, p *planner, zs tree.Zon } return err } - if tableDesc.ParentID == keys.SystemDatabaseID { + if tableDesc.TableDesc().ParentID == keys.SystemDatabaseID { return p.RequireAdminRole(ctx, "alter system tables") } @@ -325,15 +325,15 @@ func (n *setZoneConfigNode) startExec(params runParams) error { // Backward compatibility for ALTER PARTITION ... OF TABLE. Determine which // index has the specified partition. partitionName := string(n.zoneSpecifier.Partition) - indexes := table.FindIndexesWithPartition(partitionName) + indexes := table.TableDesc().FindIndexesWithPartition(partitionName) switch len(indexes) { case 0: - return fmt.Errorf("partition %q does not exist on table %q", partitionName, table.Name) + return fmt.Errorf("partition %q does not exist on table %q", partitionName, table.GetName()) case 1: n.zoneSpecifier.TableOrIndex.Index = tree.UnrestrictedName(indexes[0].Name) default: err := fmt.Errorf( - "partition %q exists on multiple indexes of table %q", partitionName, table.Name) + "partition %q exists on multiple indexes of table %q", partitionName, table.GetName()) err = pgerror.WithCandidateCode(err, pgcode.InvalidParameterValue) err = errors.WithHint(err, "try ALTER PARTITION ... OF INDEX ...") return err @@ -346,7 +346,7 @@ func (n *setZoneConfigNode) startExec(params runParams) error { var specifiers []tree.ZoneSpecifier if n.zoneSpecifier.TargetsPartition() && n.allIndexes { sqltelemetry.IncrementPartitioningCounter(sqltelemetry.AlterAllPartitions) - for _, idx := range table.AllNonDropIndexes() { + for _, idx := range table.TableDesc().AllNonDropIndexes() { if p := idx.FindPartitionByName(string(n.zoneSpecifier.Partition)); p != nil { zs := n.zoneSpecifier zs.TableOrIndex.Index = tree.UnrestrictedName(idx.Name) @@ -656,9 +656,14 @@ func (n *setZoneConfigNode) startExec(params runParams) error { hasNewSubzones := !deleteZone && index != nil execConfig := params.extendedEvalCtx.ExecCfg zoneToWrite := partialZone - + // TODO(ajwerner): This is extremely fragile because we accept a nil table + // all the way down here. + var tableDesc *sqlbase.TableDescriptor + if table != nil { + tableDesc = table.TableDesc() + } n.run.numAffected, err = writeZoneConfig(params.ctx, params.p.txn, - targetID, table, zoneToWrite, execConfig, hasNewSubzones) + targetID, tableDesc, zoneToWrite, execConfig, hasNewSubzones) if err != nil { return err } diff --git a/pkg/sql/show_create.go b/pkg/sql/show_create.go index bc71ac9a0a33..48b37bb6f9f2 100644 --- a/pkg/sql/show_create.go +++ b/pkg/sql/show_create.go @@ -64,7 +64,7 @@ func ShowCreateTable( p PlanHookState, tn *tree.Name, dbPrefix string, - desc *sqlbase.TableDescriptor, + desc *sqlbase.ImmutableTableDescriptor, lCtx simpleSchemaResolver, displayOptions ShowCreateDisplayOptions, ) (string, error) { @@ -209,7 +209,7 @@ func (p *planner) ShowCreate( ctx context.Context, dbPrefix string, allDescs []sqlbase.Descriptor, - desc *sqlbase.TableDescriptor, + desc *sqlbase.ImmutableTableDescriptor, displayOptions ShowCreateDisplayOptions, ) (string, error) { var stmt string diff --git a/pkg/sql/show_create_clauses.go b/pkg/sql/show_create_clauses.go index 102d4231484f..08ae25b2d10b 100644 --- a/pkg/sql/show_create_clauses.go +++ b/pkg/sql/show_create_clauses.go @@ -76,7 +76,7 @@ func selectComment(ctx context.Context, p PlanHookState, tableID sqlbase.ID) (tc // statement used to create the given view. It is used in the implementation of // the crdb_internal.create_statements virtual table. func ShowCreateView( - ctx context.Context, tn *tree.Name, desc *sqlbase.TableDescriptor, + ctx context.Context, tn *tree.Name, desc *sqlbase.ImmutableTableDescriptor, ) (string, error) { f := tree.NewFmtCtx(tree.FmtSimple) f.WriteString("CREATE ") @@ -99,7 +99,9 @@ func ShowCreateView( // showComments prints out the COMMENT statements sufficient to populate a // table's comments, including its index and column comments. -func showComments(table *sqlbase.TableDescriptor, tc *tableComments, buf *bytes.Buffer) error { +func showComments( + table *sqlbase.ImmutableTableDescriptor, tc *tableComments, buf *bytes.Buffer, +) error { if tc == nil { return nil } @@ -137,7 +139,7 @@ func showComments(table *sqlbase.TableDescriptor, tc *tableComments, buf *bytes. func showForeignKeyConstraint( buf *bytes.Buffer, dbPrefix string, - originTable *sqlbase.TableDescriptor, + originTable *sqlbase.ImmutableTableDescriptor, fk *sqlbase.ForeignKeyConstraint, lCtx simpleSchemaResolver, ) error { @@ -157,8 +159,8 @@ func showForeignKeyConstraint( if err != nil { return err } - fkTableName = tree.MakeTableName(tree.Name(fkDb.Name), tree.Name(fkTable.Name)) - fkTableName.ExplicitSchema = fkDb.Name != dbPrefix + fkTableName = tree.MakeTableName(tree.Name(fkDb.GetName()), tree.Name(fkTable.Name)) + fkTableName.ExplicitSchema = fkDb.GetName() != dbPrefix originNames, err = originTable.NamesForColumnIDs(fk.OriginColumnIDs) if err != nil { return err @@ -197,7 +199,7 @@ func showForeignKeyConstraint( // ShowCreateSequence returns a valid SQL representation of the // CREATE SEQUENCE statement used to create the given sequence. func ShowCreateSequence( - ctx context.Context, tn *tree.Name, desc *sqlbase.TableDescriptor, + ctx context.Context, tn *tree.Name, desc *sqlbase.ImmutableTableDescriptor, ) (string, error) { f := tree.NewFmtCtx(tree.FmtSimple) f.WriteString("CREATE ") @@ -219,7 +221,7 @@ func ShowCreateSequence( // showFamilyClause creates the FAMILY clauses for a CREATE statement, writing them // to tree.FmtCtx f -func showFamilyClause(desc *sqlbase.TableDescriptor, f *tree.FmtCtx) { +func showFamilyClause(desc *sqlbase.ImmutableTableDescriptor, f *tree.FmtCtx) { for _, fam := range desc.Families { activeColumnNames := make([]string, 0, len(fam.ColumnNames)) for i, colID := range fam.ColumnIDs { @@ -284,7 +286,7 @@ func showCreateInterleave( func ShowCreatePartitioning( a *sqlbase.DatumAlloc, codec keys.SQLCodec, - tableDesc *sqlbase.TableDescriptor, + tableDesc sqlbase.TableDescriptorInterface, idxDesc *sqlbase.IndexDescriptor, partDesc *sqlbase.PartitioningDescriptor, buf *bytes.Buffer, @@ -336,7 +338,7 @@ func ShowCreatePartitioning( buf.WriteString(`, `) } tuple, _, err := sqlbase.DecodePartitionTuple( - a, codec, tableDesc, idxDesc, partDesc, values, fakePrefixDatums) + a, codec, tableDesc.TableDesc(), idxDesc, partDesc, values, fakePrefixDatums) if err != nil { return err } @@ -360,14 +362,14 @@ func ShowCreatePartitioning( buf.WriteString(part.Name) buf.WriteString(" VALUES FROM ") fromTuple, _, err := sqlbase.DecodePartitionTuple( - a, codec, tableDesc, idxDesc, partDesc, part.FromInclusive, fakePrefixDatums) + a, codec, tableDesc.TableDesc(), idxDesc, partDesc, part.FromInclusive, fakePrefixDatums) if err != nil { return err } buf.WriteString(fromTuple.String()) buf.WriteString(" TO ") toTuple, _, err := sqlbase.DecodePartitionTuple( - a, codec, tableDesc, idxDesc, partDesc, part.ToExclusive, fakePrefixDatums) + a, codec, tableDesc.TableDesc(), idxDesc, partDesc, part.ToExclusive, fakePrefixDatums) if err != nil { return err } @@ -382,7 +384,10 @@ func ShowCreatePartitioning( // showConstraintClause creates the CONSTRAINT clauses for a CREATE statement, // writing them to tree.FmtCtx f func showConstraintClause( - ctx context.Context, desc *sqlbase.TableDescriptor, semaCtx *tree.SemaContext, f *tree.FmtCtx, + ctx context.Context, + desc *sqlbase.ImmutableTableDescriptor, + semaCtx *tree.SemaContext, + f *tree.FmtCtx, ) error { for _, e := range desc.AllActiveAndInactiveChecks() { if e.Hidden { diff --git a/pkg/sql/sqlbase/database.go b/pkg/sql/sqlbase/database.go index bdaffe0bfe83..479ffdbf428c 100644 --- a/pkg/sql/sqlbase/database.go +++ b/pkg/sql/sqlbase/database.go @@ -9,13 +9,3 @@ // licenses/APL.txt. package sqlbase - -import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - -// MakeDatabaseDesc constructs a DatabaseDescriptor from an AST node. -func MakeDatabaseDesc(p *tree.CreateDatabase) DatabaseDescriptor { - return DatabaseDescriptor{ - Name: string(p.Name), - Privileges: NewDefaultPrivilegeDescriptor(), - } -} diff --git a/pkg/sql/sqlbase/database_desc.go b/pkg/sql/sqlbase/database_desc.go new file mode 100644 index 000000000000..9334947d942a --- /dev/null +++ b/pkg/sql/sqlbase/database_desc.go @@ -0,0 +1,150 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sqlbase + +import ( + "fmt" + + "github.com/cockroachdb/cockroach/pkg/util/protoutil" +) + +// DatabaseDescriptorInterface will eventually be called dbdesc.Descriptor. +// It is implemented by ImmutableDatabaseDescriptor. +type DatabaseDescriptorInterface interface { + BaseDescriptorInterface + DatabaseDesc() *DatabaseDescriptor +} + +var _ DatabaseDescriptorInterface = (*ImmutableDatabaseDescriptor)(nil) + +// ImmutableDatabaseDescriptor wraps a database descriptor and provides methods +// on it. +type ImmutableDatabaseDescriptor struct { + DatabaseDescriptor +} + +// MutableDatabaseDescriptor wraps a database descriptor and provides methods +// on it. It can be mutated and generally has not been committed. +type MutableDatabaseDescriptor struct { + ImmutableDatabaseDescriptor + + ClusterVersion *DatabaseDescriptor +} + +// NewInitialDatabaseDescriptor constructs a new DatabaseDescriptor for an +// initial version from an id and name. +func NewInitialDatabaseDescriptor(id ID, name string) *ImmutableDatabaseDescriptor { + return NewInitialDatabaseDescriptorWithPrivileges(id, name, + NewDefaultPrivilegeDescriptor()) +} + +// NewInitialDatabaseDescriptorWithPrivileges constructs a new DatabaseDescriptor for an +// initial version from an id and name. +func NewInitialDatabaseDescriptorWithPrivileges( + id ID, name string, privileges *PrivilegeDescriptor, +) *ImmutableDatabaseDescriptor { + return NewImmutableDatabaseDescriptor(DatabaseDescriptor{ + Name: name, + ID: id, + Version: 1, + Privileges: privileges, + }) +} + +func makeImmutableDatabaseDesc(desc DatabaseDescriptor) ImmutableDatabaseDescriptor { + return ImmutableDatabaseDescriptor{DatabaseDescriptor: desc} +} + +// NewImmutableDatabaseDescriptor makes a new database descriptor. +func NewImmutableDatabaseDescriptor(desc DatabaseDescriptor) *ImmutableDatabaseDescriptor { + ret := makeImmutableDatabaseDesc(desc) + return &ret +} + +// NewMutableDatabaseDescriptor creates a new MutableDatabaseDescriptor. The +// version of the returned descriptor will be the successor of the descriptor +// from which it was constructed. +func NewMutableDatabaseDescriptor(mutationOf DatabaseDescriptor) *MutableDatabaseDescriptor { + mut := &MutableDatabaseDescriptor{ + ImmutableDatabaseDescriptor: makeImmutableDatabaseDesc(*protoutil.Clone(&mutationOf).(*DatabaseDescriptor)), + ClusterVersion: &mutationOf, + } + mut.Version++ + return mut +} + +// TypeName returns the plain type of this descriptor. +func (desc *DatabaseDescriptor) TypeName() string { + return "database" +} + +// DatabaseDesc implements the ObjectDescriptor interface. +func (desc *DatabaseDescriptor) DatabaseDesc() *DatabaseDescriptor { + return desc +} + +// SchemaDesc implements the ObjectDescriptor interface. +func (desc *DatabaseDescriptor) SchemaDesc() *SchemaDescriptor { + return nil +} + +// TableDesc implements the ObjectDescriptor interface. +func (desc *DatabaseDescriptor) TableDesc() *TableDescriptor { + return nil +} + +// TypeDesc implements the ObjectDescriptor interface. +func (desc *DatabaseDescriptor) TypeDesc() *TypeDescriptor { + return nil +} + +// NameResolutionResult implements the ObjectDescriptor interface. +func (desc *ImmutableDatabaseDescriptor) NameResolutionResult() {} + +// GetAuditMode is part of the DescriptorProto interface. +// This is a stub until per-database auditing is enabled. +func (desc *ImmutableDatabaseDescriptor) GetAuditMode() TableDescriptor_AuditMode { + return TableDescriptor_DISABLED +} + +// DescriptorProto wraps a DatabaseDescriptor in a Descriptor. +func (desc *ImmutableDatabaseDescriptor) DescriptorProto() *Descriptor { + return &Descriptor{ + Union: &Descriptor_Database{ + Database: &desc.DatabaseDescriptor, + }, + } +} + +// SetName sets the name on the descriptor. +func (desc *MutableDatabaseDescriptor) SetName(name string) { + desc.Name = name +} + +// Validate validates that the database descriptor is well formed. +// Checks include validate the database name, and verifying that there +// is at least one read and write user. +func (desc *ImmutableDatabaseDescriptor) Validate() error { + if err := validateName(desc.GetName(), "descriptor"); err != nil { + return err + } + if desc.GetID() == 0 { + return fmt.Errorf("invalid database ID %d", desc.GetID()) + } + + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + desc.Privileges.MaybeFixPrivileges(desc.GetID()) + + // Validate the privilege descriptor. + return desc.Privileges.Validate(desc.GetID()) +} diff --git a/pkg/sql/sqlbase/database_test.go b/pkg/sql/sqlbase/database_test.go index 78ef740ae5b5..a0bbf8b88fcd 100644 --- a/pkg/sql/sqlbase/database_test.go +++ b/pkg/sql/sqlbase/database_test.go @@ -25,13 +25,14 @@ func TestMakeDatabaseDesc(t *testing.T) { if err != nil { t.Fatal(err) } - desc := MakeDatabaseDesc(stmt.AST.(*tree.CreateDatabase)) - if desc.Name != "test" { - t.Fatalf("expected Name == test, got %s", desc.Name) + const id = 17 + desc := NewInitialDatabaseDescriptor(id, string(stmt.AST.(*tree.CreateDatabase).Name)) + if desc.GetName() != "test" { + t.Fatalf("expected Name == test, got %s", desc.GetName()) } // ID is not set yet. - if desc.ID != 0 { - t.Fatalf("expected ID == 0, got %d", desc.ID) + if desc.GetID() != id { + t.Fatalf("expected ID == %d, got %d", id, desc.GetID()) } if len(desc.GetPrivileges().Users) != 2 { t.Fatalf("wrong number of privilege users, expected 2, got: %d", len(desc.GetPrivileges().Users)) diff --git a/pkg/sql/sqlbase/descriptor.go b/pkg/sql/sqlbase/descriptor.go new file mode 100644 index 000000000000..9f393d386593 --- /dev/null +++ b/pkg/sql/sqlbase/descriptor.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sqlbase + +import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + +// DescriptorInterface provides table information for results from a name +// lookup. +// +// TODO(ajwerner): Move this back to catalog after sqlbase has been +// appropriately cleaned up. Furthermore, reconsider whether this interface +// actually makes much sense. It may make more sense to instead type assert into +// the individual descriptor type interfaces which we'll be introducing. +type DescriptorInterface interface { + BaseDescriptorInterface + + // DatabaseDesc returns the underlying database descriptor, or nil if the + // descriptor is not a table backed object. + DatabaseDesc() *DatabaseDescriptor + + // SchemaDesc returns the underlying schema descriptor, or nil if the + // descriptor is not a table backed object. + SchemaDesc() *SchemaDescriptor + + // TableDesc returns the underlying table descriptor, or nil if the + // descriptor is not a table backed object. + TableDesc() *TableDescriptor + + // TypeDesc returns the underlying type descriptor, or nil if the + // descriptor is not a type backed object. + TypeDesc() *TypeDescriptor +} + +// BaseDescriptorInterface is an interface to be shared by individual descriptor +// types. Perhaps this should be the actual DescriptorInterface. +type BaseDescriptorInterface interface { + tree.NameResolutionResult + + GetPrivileges() *PrivilegeDescriptor + GetID() ID + TypeName() string + GetName() string + GetAuditMode() TableDescriptor_AuditMode + + // DescriptorProto prepares this descriptor for serialization. + DescriptorProto() *Descriptor +} diff --git a/pkg/sql/sqlbase/metadata.go b/pkg/sql/sqlbase/metadata.go index 706bc2e8901b..c8308789f772 100644 --- a/pkg/sql/sqlbase/metadata.go +++ b/pkg/sql/sqlbase/metadata.go @@ -24,11 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/protoutil" ) -var _ DescriptorProto = &DatabaseDescriptor{} -var _ DescriptorProto = &TableDescriptor{} -var _ DescriptorProto = &TypeDescriptor{} -var _ DescriptorProto = &SchemaDescriptor{} - // DescriptorKey is the interface implemented by both // databaseKey and tableKey. It is used to easily get the // descriptor key and plain name. @@ -37,21 +32,11 @@ type DescriptorKey interface { Name() string } -// DescriptorProto is the interface implemented by all Descriptors. -// TODO(marc): this is getting rather large. -type DescriptorProto interface { - protoutil.Message - GetPrivileges() *PrivilegeDescriptor - GetID() ID - SetID(ID) - TypeName() string - GetName() string - SetName(string) - GetAuditMode() TableDescriptor_AuditMode -} - -// WrapDescriptor fills in a Descriptor. -func WrapDescriptor(descriptor DescriptorProto) *Descriptor { +// wrapDescriptor fills in a Descriptor from a given member of its union. +// +// TODO(ajwerner): Replace this with the relevant type-specific DescriptorProto +// methods. +func wrapDescriptor(descriptor protoutil.Message) *Descriptor { desc := &Descriptor{} switch t := descriptor.(type) { case *MutableTableDescriptor: @@ -67,7 +52,7 @@ func WrapDescriptor(descriptor DescriptorProto) *Descriptor { case *SchemaDescriptor: desc.Union = &Descriptor_Schema{Schema: t} default: - panic(fmt.Sprintf("unknown descriptor type: %s", descriptor.TypeName())) + panic(fmt.Sprintf("unknown descriptor type: %T", descriptor)) } return desc } @@ -85,7 +70,7 @@ type MetadataSchema struct { type metadataDescriptor struct { parentID ID - desc DescriptorProto + desc DescriptorInterface } // MakeMetadataSchema constructs a new MetadataSchema value which constructs @@ -103,7 +88,7 @@ func MakeMetadataSchema( } // AddDescriptor adds a new non-config descriptor to the system schema. -func (ms *MetadataSchema) AddDescriptor(parentID ID, desc DescriptorProto) { +func (ms *MetadataSchema) AddDescriptor(parentID ID, desc DescriptorInterface) { if id := desc.GetID(); id > keys.MaxReservedDescID { panic(fmt.Sprintf("invalid reserved table ID: %d > %d", id, keys.MaxReservedDescID)) } @@ -150,7 +135,7 @@ func (ms MetadataSchema) GetInitialValues() ([]roachpb.KeyValue, []roachpb.RKey) // addDescriptor generates the needed KeyValue objects to install a // descriptor on a new cluster. - addDescriptor := func(parentID ID, desc DescriptorProto) { + addDescriptor := func(parentID ID, desc DescriptorInterface) { // Create name metadata key. value := roachpb.Value{} value.SetInt(int64(desc.GetID())) @@ -178,8 +163,8 @@ func (ms MetadataSchema) GetInitialValues() ([]roachpb.KeyValue, []roachpb.RKey) // Create descriptor metadata key. value = roachpb.Value{} - wrappedDesc := WrapDescriptor(desc) - if err := value.SetProto(wrappedDesc); err != nil { + descDesc := desc.DescriptorProto() + if err := value.SetProto(descDesc); err != nil { log.Fatalf(context.TODO(), "could not marshal %v", desc) } ret = append(ret, roachpb.KeyValue{ @@ -242,8 +227,8 @@ var systemTableIDCache = func() [2]map[string]ID { ms := MetadataSchema{codec: codec} addSystemDescriptorsToSchema(&ms) for _, d := range ms.descs { - t, ok := d.desc.(*TableDescriptor) - if !ok || t.ParentID != SystemDB.ID || t.ID > keys.MaxReservedDescID { + t := d.desc.TableDesc() + if t == nil || t.ParentID != keys.SystemDatabaseID || t.ID > keys.MaxReservedDescID { // We only cache table descriptors under 'system' with a // reserved table ID. continue @@ -286,7 +271,7 @@ func boolToInt(b bool) int { func LookupSystemTableDescriptorID( ctx context.Context, settings *cluster.Settings, codec keys.SQLCodec, dbID ID, tableName string, ) ID { - if dbID != SystemDB.ID { + if dbID != SystemDB.GetID() { return InvalidID } diff --git a/pkg/sql/sqlbase/privilege.go b/pkg/sql/sqlbase/privilege.go index 5c867f46504d..a493da1cc25e 100644 --- a/pkg/sql/sqlbase/privilege.go +++ b/pkg/sql/sqlbase/privilege.go @@ -176,6 +176,11 @@ func (p *PrivilegeDescriptor) Revoke(user string, privList privilege.List) { // * fixing default privileges for the "root" user // * fixing maximum privileges for users. // Returns true if the privilege descriptor was modified. +// +// TODO(ajwerner): Figure out whether this is still needed. It seems like +// perhaps it was intended only for the 2.0 release but then somehow we got +// bad descriptors with bad initial permissions into later versions or we didn't +// properly bake this migration in. func (p *PrivilegeDescriptor) MaybeFixPrivileges(id ID) bool { allowedPrivilegesBits := privilege.ALL.Mask() if IsReservedID(id) { diff --git a/pkg/sql/sqlbase/schema_desc.go b/pkg/sql/sqlbase/schema_desc.go new file mode 100644 index 000000000000..ba894d6e1457 --- /dev/null +++ b/pkg/sql/sqlbase/schema_desc.go @@ -0,0 +1,109 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sqlbase + +// SchemaDescriptorInterface will eventually be called dbdesc.Descriptor. +// It is implemented by ImmutableSchemaDescriptor. +type SchemaDescriptorInterface interface { + BaseDescriptorInterface + SchemaDesc() *SchemaDescriptor +} + +var _ SchemaDescriptorInterface = (*ImmutableSchemaDescriptor)(nil) +var _ SchemaDescriptorInterface = (*MutableSchemaDescriptor)(nil) + +// ImmutableSchemaDescriptor wraps a Schema descriptor and provides methods +// on it. +type ImmutableSchemaDescriptor struct { + SchemaDescriptor +} + +// MutableSchemaDescriptor is a mutable reference to a SchemaDescriptor. +// +// Note: Today this isn't actually ever mutated but rather exists for a future +// where we anticipate having a mutable copy of Schema descriptors. There's a +// large amount of space to question this `Mutable|Immutable` version of each +// descriptor type. Maybe it makes no sense but we're running with it for the +// moment. This is an intermediate state on the road to descriptors being +// handled outside of the catalog entirely as interfaces. +type MutableSchemaDescriptor struct { + ImmutableSchemaDescriptor + + ClusterVersion *ImmutableSchemaDescriptor +} + +// NewImmutableSchemaDescriptor makes a new Schema descriptor. +func NewImmutableSchemaDescriptor(desc SchemaDescriptor) *ImmutableSchemaDescriptor { + return &ImmutableSchemaDescriptor{ + SchemaDescriptor: desc, + } +} + +// Reference these functions to defeat the linter. +var ( + _ = NewImmutableSchemaDescriptor + _ = NewInitialSchemaDescriptor +) + +// NewInitialSchemaDescriptor constructs a new SchemaDescriptor for an +// initial version from an id and name. +func NewInitialSchemaDescriptor(id ID, name string) *ImmutableSchemaDescriptor { + return &ImmutableSchemaDescriptor{ + SchemaDescriptor: SchemaDescriptor{ + ID: id, + Name: name, + Version: 1, + Privileges: NewDefaultPrivilegeDescriptor(), + }, + } +} + +// GetAuditMode implements the DescriptorProto interface. +func (desc *ImmutableSchemaDescriptor) GetAuditMode() TableDescriptor_AuditMode { + return TableDescriptor_DISABLED +} + +// TypeName implements the DescriptorProto interface. +func (desc *ImmutableSchemaDescriptor) TypeName() string { + return "schema" +} + +// DatabaseDesc implements the ObjectDescriptor interface. +func (desc *ImmutableSchemaDescriptor) DatabaseDesc() *DatabaseDescriptor { + return nil +} + +// SchemaDesc implements the ObjectDescriptor interface. +func (desc *ImmutableSchemaDescriptor) SchemaDesc() *SchemaDescriptor { + return &desc.SchemaDescriptor +} + +// TableDesc implements the ObjectDescriptor interface. +func (desc *ImmutableSchemaDescriptor) TableDesc() *TableDescriptor { + return nil +} + +// TypeDesc implements the ObjectDescriptor interface. +func (desc *ImmutableSchemaDescriptor) TypeDesc() *TypeDescriptor { + return nil +} + +// DescriptorProto wraps a SchemaDescriptor in a Descriptor. +func (desc *ImmutableSchemaDescriptor) DescriptorProto() *Descriptor { + return &Descriptor{ + Union: &Descriptor_Schema{ + Schema: &desc.SchemaDescriptor, + }, + } +} + +// NameResolutionResult implements the ObjectDescriptor interface. +func (desc *ImmutableSchemaDescriptor) NameResolutionResult() {} diff --git a/pkg/sql/sqlbase/structured.go b/pkg/sql/sqlbase/structured.go index dcc47174a081..37822ef72ca5 100644 --- a/pkg/sql/sqlbase/structured.go +++ b/pkg/sql/sqlbase/structured.go @@ -13,6 +13,7 @@ package sqlbase import ( "context" "fmt" + "runtime/debug" "sort" "strconv" "strings" @@ -170,6 +171,13 @@ type MutableTableDescriptor struct { ClusterVersion TableDescriptor } +// DescriptorProto prepares desc for serialization. +func (desc *TableDescriptor) DescriptorProto() *Descriptor { + // TODO(ajwerner): Copy over the metadata fields. This method should not exist + // on the TableDescriptor itself but rather on the wrappers. + return wrapDescriptor(desc) +} + // ImmutableTableDescriptor is a custom type for TableDescriptors // It holds precomputed values and the underlying TableDescriptor // should be const. @@ -341,10 +349,10 @@ type protoGetter interface { // descriptor doesn't exist or if it exists and is not a database. func GetDatabaseDescFromID( ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, -) (*DatabaseDescriptor, error) { +) (*ImmutableDatabaseDescriptor, error) { desc := &Descriptor{} descKey := MakeDescMetadataKey(codec, id) - _, err := protoGetter.GetProtoTs(ctx, descKey, desc) + ts, err := protoGetter.GetProtoTs(ctx, descKey, desc) if err != nil { return nil, err } @@ -352,26 +360,8 @@ func GetDatabaseDescFromID( if db == nil { return nil, ErrDescriptorNotFound } - return db, nil -} - -// GetTypeDescFromID retrieves the type descriptor for the type ID passed -// in using an existing proto getter. It returns an error if the descriptor -// doesn't exist or if it exists and is not a type descriptor. -func GetTypeDescFromID( - ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, -) (*TypeDescriptor, error) { - descKey := MakeDescMetadataKey(codec, id) - desc := &Descriptor{} - _, err := protoGetter.GetProtoTs(ctx, descKey, desc) - if err != nil { - return nil, err - } - typ := desc.GetType() - if typ == nil { - return nil, ErrDescriptorNotFound - } - return typ, nil + desc.MaybeSetModificationTimeFromMVCCTimestamp(ctx, ts) + return NewImmutableDatabaseDescriptor(*db), nil } // GetTableDescFromID retrieves the table descriptor for the table @@ -684,11 +674,6 @@ func (desc *IndexDescriptor) IsPartial() bool { return desc.Predicate != "" } -// SetID implements the DescriptorProto interface. -func (desc *TableDescriptor) SetID(id ID) { - desc.ID = id -} - // TypeName returns the plain type of this descriptor. func (desc *TableDescriptor) TypeName() string { return "relation" @@ -2752,6 +2737,9 @@ func (desc *TableDescriptor) FindFamilyByID(id FamilyID) (*ColumnFamilyDescripto // FindIndexByName finds the index with the specified name in the active // list or the mutations list. It returns true if the index is being dropped. +// +// TODO(ajwerner): Lift this and methods like it up to the +// ImmutableTableDescriptor. func (desc *TableDescriptor) FindIndexByName(name string) (*IndexDescriptor, bool, error) { if desc.IsPhysicalTable() && desc.PrimaryIndex.Name == name { return &desc.PrimaryIndex, false, nil @@ -3679,64 +3667,6 @@ func ColumnsSelectors(cols []ColumnDescriptor) tree.SelectExprs { return exprs } -// SetID implements the DescriptorProto interface. -func (desc *DatabaseDescriptor) SetID(id ID) { - desc.ID = id -} - -// TypeName returns the plain type of this descriptor. -func (desc *DatabaseDescriptor) TypeName() string { - return "database" -} - -// SetName implements the DescriptorProto interface. -func (desc *DatabaseDescriptor) SetName(name string) { - desc.Name = name -} - -// DatabaseDesc implements the ObjectDescriptor interface. -func (desc *DatabaseDescriptor) DatabaseDesc() *DatabaseDescriptor { - return desc -} - -// SchemaDesc implements the ObjectDescriptor interface. -func (desc *DatabaseDescriptor) SchemaDesc() *SchemaDescriptor { - return nil -} - -// TableDesc implements the ObjectDescriptor interface. -func (desc *DatabaseDescriptor) TableDesc() *TableDescriptor { - return nil -} - -// TypeDesc implements the ObjectDescriptor interface. -func (desc *DatabaseDescriptor) TypeDesc() *TypeDescriptor { - return nil -} - -// NameResolutionResult implements the ObjectDescriptor interface. -func (desc *DatabaseDescriptor) NameResolutionResult() {} - -// Validate validates that the database descriptor is well formed. -// Checks include validate the database name, and verifying that there -// is at least one read and write user. -func (desc *DatabaseDescriptor) Validate() error { - if err := validateName(desc.Name, "descriptor"); err != nil { - return err - } - if desc.ID == 0 { - return fmt.Errorf("invalid database ID %d", desc.ID) - } - - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - desc.Privileges.MaybeFixPrivileges(desc.GetID()) - - // Validate the privilege descriptor. - return desc.Privileges.Validate(desc.GetID()) -} - // GetID returns the ID of the descriptor. func (desc *Descriptor) GetID() ID { switch t := desc.Union.(type) { @@ -3746,8 +3676,10 @@ func (desc *Descriptor) GetID() ID { return t.Database.ID case *Descriptor_Type: return t.Type.ID + case *Descriptor_Schema: + return t.Schema.ID default: - return 0 + panic(errors.AssertionFailedf("GetID: unknown Descriptor type %T", t)) } } @@ -3760,27 +3692,66 @@ func (desc *Descriptor) GetName() string { return t.Database.Name case *Descriptor_Type: return t.Type.Name + case *Descriptor_Schema: + return t.Schema.Name default: - return "" + panic(errors.AssertionFailedf("GetName: unknown Descriptor type %T", t)) } } -// Table is a replacement for GetTable() which seeks to ensure that clients -// which unmarshal Descriptor structs properly set the ModificationTime on -// tables based on the MVCC timestamp at which the descriptor was read. -// -// A linter should ensure that GetTable() is not called. -func (desc *Descriptor) Table(ts hlc.Timestamp) *TableDescriptor { - t := desc.GetTable() - if t != nil { - t.maybeSetTimeFromMVCCTimestamp(ts) +// GetVersion returns the Version of the descriptor. +func (desc *Descriptor) GetVersion() DescriptorVersion { + switch t := desc.Union.(type) { + case *Descriptor_Table: + return t.Table.Version + case *Descriptor_Database: + return t.Database.Version + case *Descriptor_Type: + return t.Type.Version + case *Descriptor_Schema: + return t.Schema.Version + default: + panic(errors.AssertionFailedf("GetVersion: unknown Descriptor type %T", t)) + } +} + +// GetModificationTime returns the ModificationTime of the descriptor. +func (desc *Descriptor) GetModificationTime() hlc.Timestamp { + switch t := desc.Union.(type) { + case *Descriptor_Table: + return t.Table.ModificationTime + case *Descriptor_Database: + return t.Database.ModificationTime + case *Descriptor_Type: + return t.Type.ModificationTime + case *Descriptor_Schema: + return t.Schema.ModificationTime + default: + debug.PrintStack() + panic(errors.AssertionFailedf("GetModificationTime: unknown Descriptor type %T", t)) + } +} + +// GetModificationTime returns the ModificationTime of the descriptor. +func (desc *Descriptor) setModificationTime(ts hlc.Timestamp) { + switch t := desc.Union.(type) { + case *Descriptor_Table: + t.Table.ModificationTime = ts + case *Descriptor_Database: + t.Database.ModificationTime = ts + case *Descriptor_Type: + t.Type.ModificationTime = ts + case *Descriptor_Schema: + t.Schema.ModificationTime = ts + default: + panic(errors.AssertionFailedf("setModificationTime: unknown Descriptor type %T", t)) } - return t } -// maybeSetTimeFromMVCCTimestamp will update ModificationTime and possible -// CreateAsOfTime with the provided timestamp. If desc.ModificationTime is -// non-zero it must be the case that it is not after the provided timestamp. +// MaybeSetModificationTimeFromMVCCTimestamp will update ModificationTime and +// possibly CreateAsOfTime on TableDescriptor with the provided timestamp. If +// ModificationTime is non-zero it must be the case that it is not after the +// provided timestamp. // // When table descriptor versions are incremented they are written with a // zero-valued ModificationTime. This is done to avoid the need to observe @@ -3794,30 +3765,37 @@ func (desc *Descriptor) Table(ts hlc.Timestamp) *TableDescriptor { // // It is vital that users which read table descriptor values from the KV store // call this method. -func (desc *TableDescriptor) maybeSetTimeFromMVCCTimestamp(ts hlc.Timestamp) { - // CreateAsOfTime is used for CREATE TABLE ... AS ... and was introduced in - // v19.1. In general it is not critical to set except for tables in the ADD - // ADD state which were created from CTAS so we should not assert on its not - // being set. It's not always sensical to set it from the passed MVCC - // timestamp. However, starting in 19.2 the CreateAsOfTime and - // ModificationTime fields are both unset for the first Version of a - // TableDescriptor and the code relies on the value being set based on the - // MVCC timestamp. - if !ts.IsEmpty() && - desc.ModificationTime.IsEmpty() && - desc.CreateAsOfTime.IsEmpty() && - desc.Version == 1 { - desc.CreateAsOfTime = ts - } - - // Ensure that if the table is in the process of being added and relies on - // CreateAsOfTime that it is now set. - if desc.Adding() && desc.IsAs() && desc.CreateAsOfTime.IsEmpty() { - log.Fatalf(context.TODO(), "table descriptor for %q (%d.%d) is in the "+ - "ADD state and was created with CREATE TABLE ... AS but does not have a "+ - "CreateAsOfTime set", desc.Name, desc.ParentID, desc.ID) +func (desc *Descriptor) MaybeSetModificationTimeFromMVCCTimestamp( + ctx context.Context, ts hlc.Timestamp, +) { + switch t := desc.Union.(type) { + case nil: + // Empty descriptors shouldn't be touched. + return + case *Descriptor_Table: + // CreateAsOfTime is used for CREATE TABLE ... AS ... and was introduced in + // v19.1. In general it is not critical to set except for tables in the ADD + // state which were created from CTAS so we should not assert on its not + // being set. It's not always sensical to set it from the passed MVCC + // timestamp. However, starting in 19.2 the CreateAsOfTime and + // ModificationTime fields are both unset for the first Version of a + // TableDescriptor and the code relies on the value being set based on the + // MVCC timestamp. + if !ts.IsEmpty() && + t.Table.ModificationTime.IsEmpty() && + t.Table.CreateAsOfTime.IsEmpty() && + t.Table.Version == 1 { + t.Table.CreateAsOfTime = ts + } + + // Ensure that if the table is in the process of being added and relies on + // CreateAsOfTime that it is now set. + if t.Table.Adding() && t.Table.IsAs() && t.Table.CreateAsOfTime.IsEmpty() { + log.Fatalf(context.TODO(), "table descriptor for %q (%d.%d) is in the "+ + "ADD state and was created with CREATE TABLE ... AS but does not have a "+ + "CreateAsOfTime set", t.Table.Name, t.Table.ParentID, t.Table.ID) + } } - // Set the ModificationTime based on the passed ts if we should. // Table descriptors can be updated in place after their version has been // incremented (e.g. to include a schema change lease). @@ -3825,18 +3803,35 @@ func (desc *TableDescriptor) maybeSetTimeFromMVCCTimestamp(ts hlc.Timestamp) { // with the value that lives on the in-memory copy. That value should contain // a timestamp set by this method. Thus if the ModificationTime is set it // must not be after the MVCC timestamp we just read it at. - if desc.ModificationTime.IsEmpty() && ts.IsEmpty() { - log.Fatalf(context.TODO(), "read table descriptor for %q (%d.%d) without ModificationTime "+ - "with zero MVCC timestamp", desc.Name, desc.ParentID, desc.ID) - } else if desc.ModificationTime.IsEmpty() { - desc.ModificationTime = ts - } else if !ts.IsEmpty() && ts.Less(desc.ModificationTime) { - log.Fatalf(context.TODO(), "read table descriptor %q (%d.%d) which has a ModificationTime "+ + if modTime := desc.GetModificationTime(); modTime.IsEmpty() && ts.IsEmpty() && desc.GetVersion() > 1 { + // TODO(ajwerner): reconsider the third condition here.It seems that there + // are some cases where system tables lack this timestamp and then when they + // are rendered in some other downstream setting we expect the timestamp to + // be read. This is a hack we shouldn't need to do. + log.Fatalf(context.TODO(), "read table descriptor for %q (%d) without ModificationTime "+ + "with zero MVCC timestamp", desc.GetName(), desc.GetID()) + } else if modTime.IsEmpty() { + desc.setModificationTime(ts) + } else if !ts.IsEmpty() && ts.Less(modTime) { + log.Fatalf(context.TODO(), "read table descriptor %q (%d) which has a ModificationTime "+ "after its MVCC timestamp: has %v, expected %v", - desc.Name, desc.ParentID, desc.ID, desc.ModificationTime, ts) + desc.GetName(), desc.GetID(), modTime, ts) } } +// Table is a replacement for GetTable() which seeks to ensure that clients +// which unmarshal Descriptor structs properly set the ModificationTime on +// tables based on the MVCC timestamp at which the descriptor was read. +// +// A linter should ensure that GetTable() is not called. +func (desc *Descriptor) Table(ts hlc.Timestamp) *TableDescriptor { + t := desc.GetTable() + if t != nil { + desc.MaybeSetModificationTimeFromMVCCTimestamp(context.TODO(), ts) + } + return t +} + // IsSet returns whether or not the foreign key actually references a table. func (f ForeignKeyReference) IsSet() bool { return f.Table != 0 @@ -4192,12 +4187,6 @@ func (desc *TableDescriptor) SetAuditMode(mode tree.AuditMode) (bool, error) { return prev != desc.AuditMode, nil } -// GetAuditMode is part of the DescriptorProto interface. -// This is a stub until per-database auditing is enabled. -func (desc *DatabaseDescriptor) GetAuditMode() TableDescriptor_AuditMode { - return TableDescriptor_DISABLED -} - // FindAllReferences returns all the references from a table. func (desc *TableDescriptor) FindAllReferences() (map[ID]struct{}, error) { refs := map[ID]struct{}{} @@ -4325,260 +4314,6 @@ func (desc *ImmutableTableDescriptor) TypeDesc() *TypeDescriptor { return nil } -// MutableTypeDescriptor is a custom type for TypeDescriptors undergoing -// any types of modifications. -type MutableTypeDescriptor struct { - TypeDescriptor - - // ClusterVersion represents the version of the type descriptor read - // from the store. - ClusterVersion TypeDescriptor -} - -// ImmutableTypeDescriptor is a custom type for wrapping TypeDescriptors -// when used in a read only way. -type ImmutableTypeDescriptor struct { - TypeDescriptor -} - -// Avoid linter unused warnings. -var _ = NewMutableCreatedTypeDescriptor - -// NewMutableCreatedTypeDescriptor returns a MutableTypeDescriptor from the -// given type descriptor with the cluster version being the zero type. This -// is for a type that is created in the same transaction. -func NewMutableCreatedTypeDescriptor(desc TypeDescriptor) *MutableTypeDescriptor { - return &MutableTypeDescriptor{TypeDescriptor: desc} -} - -// NewMutableExistingTypeDescriptor returns a MutableTypeDescriptor from the -// given type descriptor with the cluster version also set to the descriptor. -// This is for types that already exist. -func NewMutableExistingTypeDescriptor(desc TypeDescriptor) *MutableTypeDescriptor { - return &MutableTypeDescriptor{TypeDescriptor: desc, ClusterVersion: desc} -} - -// NewImmutableTypeDescriptor returns an ImmutableTypeDescriptor from the -// given TypeDescriptor. -func NewImmutableTypeDescriptor(desc TypeDescriptor) *ImmutableTypeDescriptor { - return &ImmutableTypeDescriptor{TypeDescriptor: desc} -} - -// DatabaseDesc implements the ObjectDescriptor interface. -func (desc *TypeDescriptor) DatabaseDesc() *DatabaseDescriptor { - return nil -} - -// SchemaDesc implements the ObjectDescriptor interface. -func (desc *TypeDescriptor) SchemaDesc() *SchemaDescriptor { - return nil -} - -// TableDesc implements the ObjectDescriptor interface. -func (desc *TypeDescriptor) TableDesc() *TableDescriptor { - return nil -} - -// TypeDesc implements the ObjectDescriptor interface. -func (desc *TypeDescriptor) TypeDesc() *TypeDescriptor { - return desc -} - -// GetAuditMode implements the DescriptorProto interface. -func (desc *TypeDescriptor) GetAuditMode() TableDescriptor_AuditMode { - return TableDescriptor_DISABLED -} - -// GetPrivileges implements the DescriptorProto interface. -func (desc *TypeDescriptor) GetPrivileges() *PrivilegeDescriptor { - return nil -} - -// SetID implements the DescriptorProto interface. -func (desc *TypeDescriptor) SetID(id ID) { - desc.ID = id -} - -// TypeName implements the DescriptorProto interface. -func (desc *TypeDescriptor) TypeName() string { - return "type" -} - -// SetName implements the DescriptorProto interface. -func (desc *TypeDescriptor) SetName(name string) { - desc.Name = name -} - -// HydrateTypeInfoWithName fills in user defined type metadata for -// a type and sets the name in the metadata to the passed in name. -// This is used when hydrating a type with a known qualified name. -// TODO (rohany): This method should eventually be defined on an -// ImmutableTypeDescriptor so that pointers to the cached info -// can be shared among callers. -func (desc *TypeDescriptor) HydrateTypeInfoWithName( - typ *types.T, name *tree.TypeName, typeLookup TypeLookupFunc, -) error { - typ.TypeMeta.Name = types.MakeUserDefinedTypeName(name.Catalog(), name.Schema(), name.Object()) - switch desc.Kind { - case TypeDescriptor_ENUM: - if typ.Family() != types.EnumFamily { - return errors.New("cannot hydrate a non-enum type with an enum type descriptor") - } - logical := make([]string, len(desc.EnumMembers)) - physical := make([][]byte, len(desc.EnumMembers)) - for i := range desc.EnumMembers { - member := &desc.EnumMembers[i] - logical[i] = member.LogicalRepresentation - physical[i] = member.PhysicalRepresentation - } - typ.TypeMeta.EnumData = &types.EnumMetadata{ - LogicalRepresentations: logical, - PhysicalRepresentations: physical, - } - return nil - case TypeDescriptor_ALIAS: - if typ.UserDefined() { - switch typ.Family() { - case types.ArrayFamily: - // Hydrate the element type. - elemType := typ.ArrayContents() - elemTypName, elemTypDesc, err := typeLookup(ID(elemType.StableTypeID())) - if err != nil { - return err - } - if err := elemTypDesc.HydrateTypeInfoWithName(elemType, elemTypName, typeLookup); err != nil { - return err - } - return nil - default: - return errors.AssertionFailedf("only array types aliases can be user defined") - } - } - return nil - default: - return errors.AssertionFailedf("unknown type descriptor kind %s", desc.Kind) - } -} - -// TypeLookupFunc is a type alias for a function that looks up a type by ID. -type TypeLookupFunc func(id ID) (*tree.TypeName, *TypeDescriptor, error) - -// MakeTypesT creates a types.T from the input type descriptor. -func (desc *TypeDescriptor) MakeTypesT( - name *tree.TypeName, typeLookup TypeLookupFunc, -) (*types.T, error) { - switch t := desc.Kind; t { - case TypeDescriptor_ENUM: - typ := types.MakeEnum(uint32(desc.ID), uint32(desc.ArrayTypeID)) - if err := desc.HydrateTypeInfoWithName(typ, name, typeLookup); err != nil { - return nil, err - } - return typ, nil - case TypeDescriptor_ALIAS: - // Hydrate the alias and return it. - if err := desc.HydrateTypeInfoWithName(desc.Alias, name, typeLookup); err != nil { - return nil, err - } - return desc.Alias, nil - default: - return nil, errors.AssertionFailedf("unknown type kind %s", t.String()) - } -} - -// HydrateTypesInTableDescriptor uses typeLookup to install metadata in the -// types present in a table descriptor. typeLookup retrieves the fully -// qualified name and descriptor for a particular ID. -func HydrateTypesInTableDescriptor(desc *TableDescriptor, typeLookup TypeLookupFunc) error { - for i := range desc.Columns { - col := &desc.Columns[i] - if col.Type.UserDefined() { - // Look up its type descriptor. - name, typDesc, err := typeLookup(ID(col.Type.StableTypeID())) - if err != nil { - return err - } - // TODO (rohany): This should be a noop if the hydrated type - // information present in the descriptor has the same version as - // the resolved type descriptor we found here. - if err := typDesc.HydrateTypeInfoWithName(col.Type, name, typeLookup); err != nil { - return err - } - } - } - return nil -} - -// MakeSimpleAliasTypeDescriptor creates a type descriptor that is an alias -// for the input type. It is intended to be used as an intermediate for name -// resolution, and should not be serialized and stored on disk. -func MakeSimpleAliasTypeDescriptor(typ *types.T) *TypeDescriptor { - return &TypeDescriptor{ - ParentID: InvalidID, - ParentSchemaID: InvalidID, - Name: typ.Name(), - ID: InvalidID, - Kind: TypeDescriptor_ALIAS, - Alias: typ, - } -} - -// MakeTypeDescriptor creates a type descriptor. It does not fill in kind -// specific information about the type. -func MakeTypeDescriptor(parentID, parentSchemaID, id ID, name string) TypeDescriptor { - return TypeDescriptor{ - ParentID: parentID, - ParentSchemaID: parentSchemaID, - Name: name, - ID: id, - } -} - -// NameResolutionResult implements the NameResolutionResult interface. -func (desc *TypeDescriptor) NameResolutionResult() {} - -// GetAuditMode implements the DescriptorProto interface. -func (desc *SchemaDescriptor) GetAuditMode() TableDescriptor_AuditMode { - return TableDescriptor_DISABLED -} - -// SetID implements the DescriptorProto interface. -func (desc *SchemaDescriptor) SetID(id ID) { - desc.ID = id -} - -// TypeName implements the DescriptorProto interface. -func (desc *SchemaDescriptor) TypeName() string { - return "schema" -} - -// SetName implements the DescriptorProto interface. -func (desc *SchemaDescriptor) SetName(name string) { - desc.Name = name -} - -// DatabaseDesc implements the ObjectDescriptor interface. -func (desc *SchemaDescriptor) DatabaseDesc() *DatabaseDescriptor { - return nil -} - -// SchemaDesc implements the ObjectDescriptor interface. -func (desc *SchemaDescriptor) SchemaDesc() *SchemaDescriptor { - return desc -} - -// TableDesc implements the ObjectDescriptor interface. -func (desc *SchemaDescriptor) TableDesc() *TableDescriptor { - return nil -} - -// TypeDesc implements the ObjectDescriptor interface. -func (desc *SchemaDescriptor) TypeDesc() *TypeDescriptor { - return nil -} - -// NameResolutionResult implements the ObjectDescriptor interface. -func (desc *SchemaDescriptor) NameResolutionResult() {} - // DatabaseKey implements DescriptorKey. type DatabaseKey struct { name string diff --git a/pkg/sql/sqlbase/structured.pb.go b/pkg/sql/sqlbase/structured.pb.go index 87e00f583e36..78b04a98d47a 100644 --- a/pkg/sql/sqlbase/structured.pb.go +++ b/pkg/sql/sqlbase/structured.pb.go @@ -74,7 +74,7 @@ func (x *ConstraintValidity) UnmarshalJSON(data []byte) error { return nil } func (ConstraintValidity) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{0} } type ForeignKeyReference_Action int32 @@ -119,7 +119,7 @@ func (x *ForeignKeyReference_Action) UnmarshalJSON(data []byte) error { return nil } func (ForeignKeyReference_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{0, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{0, 0} } // Match is the algorithm used to compare composite keys. @@ -159,7 +159,7 @@ func (x *ForeignKeyReference_Match) UnmarshalJSON(data []byte) error { return nil } func (ForeignKeyReference_Match) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{0, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{0, 1} } // The direction of a column in the index. @@ -196,7 +196,7 @@ func (x *IndexDescriptor_Direction) UnmarshalJSON(data []byte) error { return nil } func (IndexDescriptor_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{7, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{7, 0} } // The type of the index. @@ -233,7 +233,7 @@ func (x *IndexDescriptor_Type) UnmarshalJSON(data []byte) error { return nil } func (IndexDescriptor_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{7, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{7, 1} } type ConstraintToUpdate_ConstraintType int32 @@ -276,7 +276,7 @@ func (x *ConstraintToUpdate_ConstraintType) UnmarshalJSON(data []byte) error { return nil } func (ConstraintToUpdate_ConstraintType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{8, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{8, 0} } // A descriptor within a mutation is unavailable for reads, writes @@ -341,7 +341,7 @@ func (x *DescriptorMutation_State) UnmarshalJSON(data []byte) error { return nil } func (DescriptorMutation_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{11, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{11, 0} } // Direction of mutation. @@ -384,7 +384,7 @@ func (x *DescriptorMutation_Direction) UnmarshalJSON(data []byte) error { return nil } func (DescriptorMutation_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{11, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{11, 1} } // State is set if this TableDescriptor is in the process of being added or deleted. @@ -435,7 +435,7 @@ func (x *TableDescriptor_State) UnmarshalJSON(data []byte) error { return nil } func (TableDescriptor_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 0} } // AuditMode indicates which auditing actions to take when this table is used. @@ -472,7 +472,7 @@ func (x *TableDescriptor_AuditMode) UnmarshalJSON(data []byte) error { return nil } func (TableDescriptor_AuditMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 1} } // Represents the kind of type that this type descriptor represents. @@ -512,7 +512,7 @@ func (x *TypeDescriptor_Kind) UnmarshalJSON(data []byte) error { return nil } func (TypeDescriptor_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{14, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{15, 0} } // ForeignKeyReference is deprecated, replaced by ForeignKeyConstraint in v19.2 @@ -542,7 +542,7 @@ func (m *ForeignKeyReference) Reset() { *m = ForeignKeyReference{} } func (m *ForeignKeyReference) String() string { return proto.CompactTextString(m) } func (*ForeignKeyReference) ProtoMessage() {} func (*ForeignKeyReference) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{0} } func (m *ForeignKeyReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -604,7 +604,7 @@ func (m *ForeignKeyConstraint) Reset() { *m = ForeignKeyConstraint{} } func (m *ForeignKeyConstraint) String() string { return proto.CompactTextString(m) } func (*ForeignKeyConstraint) ProtoMessage() {} func (*ForeignKeyConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{1} } func (m *ForeignKeyConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -666,7 +666,7 @@ func (m *ColumnDescriptor) Reset() { *m = ColumnDescriptor{} } func (m *ColumnDescriptor) String() string { return proto.CompactTextString(m) } func (*ColumnDescriptor) ProtoMessage() {} func (*ColumnDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{2} + return fileDescriptor_structured_8ef81714cff0efcf, []int{2} } func (m *ColumnDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -722,7 +722,7 @@ func (m *ColumnFamilyDescriptor) Reset() { *m = ColumnFamilyDescriptor{} func (m *ColumnFamilyDescriptor) String() string { return proto.CompactTextString(m) } func (*ColumnFamilyDescriptor) ProtoMessage() {} func (*ColumnFamilyDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{3} + return fileDescriptor_structured_8ef81714cff0efcf, []int{3} } func (m *ColumnFamilyDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -768,7 +768,7 @@ func (m *InterleaveDescriptor) Reset() { *m = InterleaveDescriptor{} } func (m *InterleaveDescriptor) String() string { return proto.CompactTextString(m) } func (*InterleaveDescriptor) ProtoMessage() {} func (*InterleaveDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{4} + return fileDescriptor_structured_8ef81714cff0efcf, []int{4} } func (m *InterleaveDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -812,7 +812,7 @@ func (m *InterleaveDescriptor_Ancestor) Reset() { *m = InterleaveDescrip func (m *InterleaveDescriptor_Ancestor) String() string { return proto.CompactTextString(m) } func (*InterleaveDescriptor_Ancestor) ProtoMessage() {} func (*InterleaveDescriptor_Ancestor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{4, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{4, 0} } func (m *InterleaveDescriptor_Ancestor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -867,7 +867,7 @@ func (m *ShardedDescriptor) Reset() { *m = ShardedDescriptor{} } func (m *ShardedDescriptor) String() string { return proto.CompactTextString(m) } func (*ShardedDescriptor) ProtoMessage() {} func (*ShardedDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{5} + return fileDescriptor_structured_8ef81714cff0efcf, []int{5} } func (m *ShardedDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -912,7 +912,7 @@ func (m *PartitioningDescriptor) Reset() { *m = PartitioningDescriptor{} func (m *PartitioningDescriptor) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor) ProtoMessage() {} func (*PartitioningDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{6} + return fileDescriptor_structured_8ef81714cff0efcf, []int{6} } func (m *PartitioningDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -955,7 +955,7 @@ func (m *PartitioningDescriptor_List) Reset() { *m = PartitioningDescrip func (m *PartitioningDescriptor_List) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor_List) ProtoMessage() {} func (*PartitioningDescriptor_List) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{6, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{6, 0} } func (m *PartitioningDescriptor_List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1000,7 +1000,7 @@ func (m *PartitioningDescriptor_Range) Reset() { *m = PartitioningDescri func (m *PartitioningDescriptor_Range) String() string { return proto.CompactTextString(m) } func (*PartitioningDescriptor_Range) ProtoMessage() {} func (*PartitioningDescriptor_Range) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{6, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{6, 1} } func (m *PartitioningDescriptor_Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1164,7 +1164,7 @@ func (m *IndexDescriptor) Reset() { *m = IndexDescriptor{} } func (m *IndexDescriptor) String() string { return proto.CompactTextString(m) } func (*IndexDescriptor) ProtoMessage() {} func (*IndexDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{7} + return fileDescriptor_structured_8ef81714cff0efcf, []int{7} } func (m *IndexDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1215,7 +1215,7 @@ func (m *ConstraintToUpdate) Reset() { *m = ConstraintToUpdate{} } func (m *ConstraintToUpdate) String() string { return proto.CompactTextString(m) } func (*ConstraintToUpdate) ProtoMessage() {} func (*ConstraintToUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{8} + return fileDescriptor_structured_8ef81714cff0efcf, []int{8} } func (m *ConstraintToUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1258,7 +1258,7 @@ func (m *PrimaryKeySwap) Reset() { *m = PrimaryKeySwap{} } func (m *PrimaryKeySwap) String() string { return proto.CompactTextString(m) } func (*PrimaryKeySwap) ProtoMessage() {} func (*PrimaryKeySwap) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{9} + return fileDescriptor_structured_8ef81714cff0efcf, []int{9} } func (m *PrimaryKeySwap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1295,7 +1295,7 @@ func (m *ComputedColumnSwap) Reset() { *m = ComputedColumnSwap{} } func (m *ComputedColumnSwap) String() string { return proto.CompactTextString(m) } func (*ComputedColumnSwap) ProtoMessage() {} func (*ComputedColumnSwap) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{10} + return fileDescriptor_structured_8ef81714cff0efcf, []int{10} } func (m *ComputedColumnSwap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1351,7 +1351,7 @@ func (m *DescriptorMutation) Reset() { *m = DescriptorMutation{} } func (m *DescriptorMutation) String() string { return proto.CompactTextString(m) } func (*DescriptorMutation) ProtoMessage() {} func (*DescriptorMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{11} + return fileDescriptor_structured_8ef81714cff0efcf, []int{11} } func (m *DescriptorMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1578,6 +1578,112 @@ func _DescriptorMutation_OneofSizer(msg proto.Message) (n int) { return n } +// A table descriptor is named through a name map stored in the +// system.namespace table: a map from {parent_id, table_name} -> id. +// This name map can be cached for performance on a node in the cluster +// making reassigning a name complicated. In particular, since a +// name cannot be withdrawn across a cluster in a transaction at +// timestamp T, we have to worry about the following: +// +// 1. A table is dropped at T, and the name and descriptor are still +// cached and used by transactions at timestamps >= T. +// 2. A table is renamed from foo to bar at T, and both names foo and bar +// can be used by transactions at timestamps >= T. +// 3. A name foo is reassigned from one table to another at T, and the name +// foo can reference two different tables at timestamps >= T. +// +// The system ensures that a name can be resolved only to a single +// descriptor at a timestamp thereby permitting 1 and 2, but not 3 +// (the name references two tables). +// +// The transaction at T is followed by a time period when names no longer +// a part of the namespace are drained from the system. Once the old name +// is drained from the system another transaction at timestamp S is +// executed to release the name for future use. The interval from T to S +// is called the name drain interval: If the T transaction is removing +// the name foo then, at timestamps above S, foo can no longer be resolved. +// +// Consider a transaction at T in which name B is dropped, a new name C is +// created. Name C is viable as soon as the transaction commits. +// When the transaction at S commits, the name B is released for reuse. +// +// The transaction at S runs through the schema changer, with the system +// returning a response to the client initiating transaction T only after +// transaction at S is committed. So effectively the SQL transaction once +// it returns can be followed by SQL transactions that do not observe +// old name mappings. +// +// Note: an exception to this is #19925 which needs to be fixed. +// +// In order for transaction at S to act properly the system.namespace +// table entry for an old name references the descriptor who was the +// prior owner of the name requiring draining. +// +// Before T: B -> Desc B +// +// After T and before S: B -> Desc B, C -> Desc C +// +// After S: C -> Desc C +// +// Between T and S the name B is drained and the system is unable +// to assign it to another descriptor. +// +// BEGIN; +// RENAME foo TO bar; +// CREATE foo; +// +// will fail because CREATE foo is executed at T. +// +// RENAME foo TO bar; +// CREATE foo; +// +// will succeed because the RENAME returns after S and CREATE foo is +// executed after S. +// +// The above scheme suffers from the problem that a transaction can observe +// the partial effect of a committed transaction during the drain interval. +// For instance during the drain interval a transaction can see the correct +// assignment for C, and the old assignments for B. +// +type NameInfo struct { + // The database that the table belonged to before the rename (tables can be + // renamed from one db to another). + ParentID ID `protobuf:"varint,1,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` + // The schemaID of the schema the table belongs to before the rename/drop. + // Required to correctly identify which namespace entry to reclaim. + ParentSchemaID ID `protobuf:"varint,3,opt,name=parent_schema_id,json=parentSchemaId,casttype=ID" json:"parent_schema_id"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name"` +} + +func (m *NameInfo) Reset() { *m = NameInfo{} } +func (m *NameInfo) String() string { return proto.CompactTextString(m) } +func (*NameInfo) ProtoMessage() {} +func (*NameInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_structured_8ef81714cff0efcf, []int{12} +} +func (m *NameInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NameInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *NameInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NameInfo.Merge(dst, src) +} +func (m *NameInfo) XXX_Size() int { + return m.Size() +} +func (m *NameInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NameInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NameInfo proto.InternalMessageInfo + // A TableDescriptor represents a table or view and is stored in a // structured metadata key. The TableDescriptor has a globally-unique ID, // while its member {Column,Index}Descriptors have locally-unique IDs. @@ -1586,14 +1692,6 @@ type TableDescriptor struct { // comparing it. Name string `protobuf:"bytes,1,opt,name=name" json:"name"` ID ID `protobuf:"varint,3,opt,name=id,casttype=ID" json:"id"` - // ID of the parent database. - ParentID ID `protobuf:"varint,4,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` - // ID of the parent schema. For backwards compatibility, 0 means the table is - // scoped under the public physical schema (id 29). Because of this backward - // compatibility issue, this field should not be accessed directly or through - // the generated getter. Instead, use GetParentSchemaID() which is defined in - // structured.go. - UnexposedParentSchemaID ID `protobuf:"varint,40,opt,name=unexposed_parent_schema_id,json=unexposedParentSchemaId,casttype=ID" json:"unexposed_parent_schema_id"` // Monotonically increasing version of the table descriptor. // // The design maintains two invariants: @@ -1620,8 +1718,22 @@ type TableDescriptor struct { // should live inside of a Descriptor. The Descriptor.Table() method takes an // hlc timestamp to ensure that this field is set properly when extracted from // a Descriptor. - ModificationTime hlc.Timestamp `protobuf:"bytes,7,opt,name=modification_time,json=modificationTime" json:"modification_time"` - Columns []ColumnDescriptor `protobuf:"bytes,8,rep,name=columns" json:"columns"` + ModificationTime hlc.Timestamp `protobuf:"bytes,7,opt,name=modification_time,json=modificationTime" json:"modification_time"` + // A list of draining names. The draining name entries are drained from + // the cluster wide name caches by incrementing the version for this + // descriptor and ensuring that there are no leases on prior + // versions of the descriptor. This field is then cleared and the version + // of the descriptor incremented. + DrainingNames []NameInfo `protobuf:"bytes,21,rep,name=draining_names,json=drainingNames" json:"draining_names"` + // ID of the parent database. + ParentID ID `protobuf:"varint,4,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` + // ID of the parent schema. For backwards compatibility, 0 means the table is + // scoped under the public physical schema (id 29). Because of this backward + // compatibility issue, this field should not be accessed directly or through + // the generated getter. Instead, use GetParentSchemaID() which is defined in + // structured.go. + UnexposedParentSchemaID ID `protobuf:"varint,40,opt,name=unexposed_parent_schema_id,json=unexposedParentSchemaId,casttype=ID" json:"unexposed_parent_schema_id"` + Columns []ColumnDescriptor `protobuf:"bytes,8,rep,name=columns" json:"columns"` // next_column_id is used to ensure that deleted column ids are not reused. NextColumnID ColumnID `protobuf:"varint,9,opt,name=next_column_id,json=nextColumnId,casttype=ColumnID" json:"next_column_id"` // families holds information about the column families of this table. @@ -1647,12 +1759,6 @@ type TableDescriptor struct { State TableDescriptor_State `protobuf:"varint,19,opt,name=state,enum=cockroach.sql.sqlbase.TableDescriptor_State" json:"state"` OfflineReason string `protobuf:"bytes,38,opt,name=offline_reason,json=offlineReason" json:"offline_reason"` Checks []*TableDescriptor_CheckConstraint `protobuf:"bytes,20,rep,name=checks" json:"checks,omitempty"` - // A list of draining names. The draining name entries are drained from - // the cluster wide name caches by incrementing the version for this - // descriptor and ensuring that there are no leases on prior - // versions of the descriptor. This field is then cleared and the version - // of the descriptor incremented. - DrainingNames []TableDescriptor_NameInfo `protobuf:"bytes,21,rep,name=draining_names,json=drainingNames" json:"draining_names"` // The TableDescriptor is used for views in addition to tables. Views // use mostly the same fields as tables, but need to track the actual // query from the view definition as well. @@ -1730,7 +1836,7 @@ func (m *TableDescriptor) Reset() { *m = TableDescriptor{} } func (m *TableDescriptor) String() string { return proto.CompactTextString(m) } func (*TableDescriptor) ProtoMessage() {} func (*TableDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13} } func (m *TableDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1769,32 +1875,39 @@ func (m *TableDescriptor) GetID() ID { return 0 } -func (m *TableDescriptor) GetParentID() ID { +func (m *TableDescriptor) GetVersion() DescriptorVersion { if m != nil { - return m.ParentID + return m.Version } return 0 } -func (m *TableDescriptor) GetUnexposedParentSchemaID() ID { +func (m *TableDescriptor) GetModificationTime() hlc.Timestamp { if m != nil { - return m.UnexposedParentSchemaID + return m.ModificationTime } - return 0 + return hlc.Timestamp{} } -func (m *TableDescriptor) GetVersion() DescriptorVersion { +func (m *TableDescriptor) GetDrainingNames() []NameInfo { if m != nil { - return m.Version + return m.DrainingNames + } + return nil +} + +func (m *TableDescriptor) GetParentID() ID { + if m != nil { + return m.ParentID } return 0 } -func (m *TableDescriptor) GetModificationTime() hlc.Timestamp { +func (m *TableDescriptor) GetUnexposedParentSchemaID() ID { if m != nil { - return m.ModificationTime + return m.UnexposedParentSchemaID } - return hlc.Timestamp{} + return 0 } func (m *TableDescriptor) GetColumns() []ColumnDescriptor { @@ -1903,13 +2016,6 @@ func (m *TableDescriptor) GetChecks() []*TableDescriptor_CheckConstraint { return nil } -func (m *TableDescriptor) GetDrainingNames() []TableDescriptor_NameInfo { - if m != nil { - return m.DrainingNames - } - return nil -} - func (m *TableDescriptor) GetViewQuery() string { if m != nil { return m.ViewQuery @@ -2030,7 +2136,7 @@ func (m *TableDescriptor_SchemaChangeLease) Reset() { *m = TableDescript func (m *TableDescriptor_SchemaChangeLease) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_SchemaChangeLease) ProtoMessage() {} func (*TableDescriptor_SchemaChangeLease) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 0} } func (m *TableDescriptor_SchemaChangeLease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2076,7 +2182,7 @@ func (m *TableDescriptor_CheckConstraint) Reset() { *m = TableDescriptor func (m *TableDescriptor_CheckConstraint) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_CheckConstraint) ProtoMessage() {} func (*TableDescriptor_CheckConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 1} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 1} } func (m *TableDescriptor_CheckConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2101,112 +2207,6 @@ func (m *TableDescriptor_CheckConstraint) XXX_DiscardUnknown() { var xxx_messageInfo_TableDescriptor_CheckConstraint proto.InternalMessageInfo -// A table descriptor is named through a name map stored in the -// system.namespace table: a map from {parent_id, table_name} -> id. -// This name map can be cached for performance on a node in the cluster -// making reassigning a name complicated. In particular, since a -// name cannot be withdrawn across a cluster in a transaction at -// timestamp T, we have to worry about the following: -// -// 1. A table is dropped at T, and the name and descriptor are still -// cached and used by transactions at timestamps >= T. -// 2. A table is renamed from foo to bar at T, and both names foo and bar -// can be used by transactions at timestamps >= T. -// 3. A name foo is reassigned from one table to another at T, and the name -// foo can reference two different tables at timestamps >= T. -// -// The system ensures that a name can be resolved only to a single -// descriptor at a timestamp thereby permitting 1 and 2, but not 3 -// (the name references two tables). -// -// The transaction at T is followed by a time period when names no longer -// a part of the namespace are drained from the system. Once the old name -// is drained from the system another transaction at timestamp S is -// executed to release the name for future use. The interval from T to S -// is called the name drain interval: If the T transaction is removing -// the name foo then, at timestamps above S, foo can no longer be resolved. -// -// Consider a transaction at T in which name B is dropped, a new name C is -// created. Name C is viable as soon as the transaction commits. -// When the transaction at S commits, the name B is released for reuse. -// -// The transaction at S runs through the schema changer, with the system -// returning a response to the client initiating transaction T only after -// transaction at S is committed. So effectively the SQL transaction once -// it returns can be followed by SQL transactions that do not observe -// old name mappings. -// -// Note: an exception to this is #19925 which needs to be fixed. -// -// In order for transaction at S to act properly the system.namespace -// table entry for an old name references the descriptor who was the -// prior owner of the name requiring draining. -// -// Before T: B -> Desc B -// -// After T and before S: B -> Desc B, C -> Desc C -// -// After S: C -> Desc C -// -// Between T and S the name B is drained and the system is unable -// to assign it to another descriptor. -// -// BEGIN; -// RENAME foo TO bar; -// CREATE foo; -// -// will fail because CREATE foo is executed at T. -// -// RENAME foo TO bar; -// CREATE foo; -// -// will succeed because the RENAME returns after S and CREATE foo is -// executed after S. -// -// The above scheme suffers from the problem that a transaction can observe -// the partial effect of a committed transaction during the drain interval. -// For instance during the drain interval a transaction can see the correct -// assignment for C, and the old assignments for B. -// -type TableDescriptor_NameInfo struct { - // The database that the table belonged to before the rename (tables can be - // renamed from one db to another). - ParentID ID `protobuf:"varint,1,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` - // The schemaID of the schema the table belongs to before the rename/drop. - // Required to correctly identify which namespace entry to reclaim. - ParentSchemaID ID `protobuf:"varint,3,opt,name=parent_schema_id,json=parentSchemaId,casttype=ID" json:"parent_schema_id"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name"` -} - -func (m *TableDescriptor_NameInfo) Reset() { *m = TableDescriptor_NameInfo{} } -func (m *TableDescriptor_NameInfo) String() string { return proto.CompactTextString(m) } -func (*TableDescriptor_NameInfo) ProtoMessage() {} -func (*TableDescriptor_NameInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 2} -} -func (m *TableDescriptor_NameInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TableDescriptor_NameInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (dst *TableDescriptor_NameInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableDescriptor_NameInfo.Merge(dst, src) -} -func (m *TableDescriptor_NameInfo) XXX_Size() int { - return m.Size() -} -func (m *TableDescriptor_NameInfo) XXX_DiscardUnknown() { - xxx_messageInfo_TableDescriptor_NameInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_TableDescriptor_NameInfo proto.InternalMessageInfo - type TableDescriptor_Reference struct { // The ID of the relation that depends on this one. ID ID `protobuf:"varint,1,opt,name=id,casttype=ID" json:"id"` @@ -2222,7 +2222,7 @@ func (m *TableDescriptor_Reference) Reset() { *m = TableDescriptor_Refer func (m *TableDescriptor_Reference) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_Reference) ProtoMessage() {} func (*TableDescriptor_Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 3} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 2} } func (m *TableDescriptor_Reference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2259,7 +2259,7 @@ func (m *TableDescriptor_MutationJob) Reset() { *m = TableDescriptor_Mut func (m *TableDescriptor_MutationJob) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_MutationJob) ProtoMessage() {} func (*TableDescriptor_MutationJob) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 4} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 3} } func (m *TableDescriptor_MutationJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2302,7 +2302,7 @@ func (m *TableDescriptor_SequenceOpts) Reset() { *m = TableDescriptor_Se func (m *TableDescriptor_SequenceOpts) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_SequenceOpts) ProtoMessage() {} func (*TableDescriptor_SequenceOpts) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 5} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 4} } func (m *TableDescriptor_SequenceOpts) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2342,7 +2342,7 @@ func (m *TableDescriptor_SequenceOpts_SequenceOwner) String() string { } func (*TableDescriptor_SequenceOpts_SequenceOwner) ProtoMessage() {} func (*TableDescriptor_SequenceOpts_SequenceOwner) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 5, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 4, 0} } func (m *TableDescriptor_SequenceOpts_SequenceOwner) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2382,7 +2382,7 @@ func (m *TableDescriptor_Replacement) Reset() { *m = TableDescriptor_Rep func (m *TableDescriptor_Replacement) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_Replacement) ProtoMessage() {} func (*TableDescriptor_Replacement) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 6} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 5} } func (m *TableDescriptor_Replacement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2419,7 +2419,7 @@ func (m *TableDescriptor_GCDescriptorMutation) Reset() { *m = TableDescr func (m *TableDescriptor_GCDescriptorMutation) String() string { return proto.CompactTextString(m) } func (*TableDescriptor_GCDescriptorMutation) ProtoMessage() {} func (*TableDescriptor_GCDescriptorMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{12, 7} + return fileDescriptor_structured_8ef81714cff0efcf, []int{13, 6} } func (m *TableDescriptor_GCDescriptorMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2449,16 +2449,20 @@ var xxx_messageInfo_TableDescriptor_GCDescriptorMutation proto.InternalMessageIn // shared with other Descriptors. // Permissions are applied to all tables in the namespace. type DatabaseDescriptor struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name"` - ID ID `protobuf:"varint,2,opt,name=id,casttype=ID" json:"id"` - Privileges *PrivilegeDescriptor `protobuf:"bytes,3,opt,name=privileges" json:"privileges,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name"` + ID ID `protobuf:"varint,2,opt,name=id,casttype=ID" json:"id"` + // Last modification time of the descriptor. + ModificationTime hlc.Timestamp `protobuf:"bytes,4,opt,name=modification_time,json=modificationTime" json:"modification_time"` + Version DescriptorVersion `protobuf:"varint,5,opt,name=version,casttype=DescriptorVersion" json:"version"` + DrainingNames []NameInfo `protobuf:"bytes,6,rep,name=draining_names,json=drainingNames" json:"draining_names"` + Privileges *PrivilegeDescriptor `protobuf:"bytes,3,opt,name=privileges" json:"privileges,omitempty"` } func (m *DatabaseDescriptor) Reset() { *m = DatabaseDescriptor{} } func (m *DatabaseDescriptor) String() string { return proto.CompactTextString(m) } func (*DatabaseDescriptor) ProtoMessage() {} func (*DatabaseDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{13} + return fileDescriptor_structured_8ef81714cff0efcf, []int{14} } func (m *DatabaseDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2497,6 +2501,27 @@ func (m *DatabaseDescriptor) GetID() ID { return 0 } +func (m *DatabaseDescriptor) GetModificationTime() hlc.Timestamp { + if m != nil { + return m.ModificationTime + } + return hlc.Timestamp{} +} + +func (m *DatabaseDescriptor) GetVersion() DescriptorVersion { + if m != nil { + return m.Version + } + return 0 +} + +func (m *DatabaseDescriptor) GetDrainingNames() []NameInfo { + if m != nil { + return m.DrainingNames + } + return nil +} + func (m *DatabaseDescriptor) GetPrivileges() *PrivilegeDescriptor { if m != nil { return m.Privileges @@ -2508,14 +2533,18 @@ func (m *DatabaseDescriptor) GetPrivileges() *PrivilegeDescriptor { // metadata key. The TypeDescriptor has a globally-unique ID shared with other // Descriptors. type TypeDescriptor struct { + // name is the current name of this user defined type. + Name string `protobuf:"bytes,3,opt,name=name" json:"name"` + // id is the globally unique ID for this type. + ID ID `protobuf:"varint,4,opt,name=id,casttype=ID" json:"id"` + Version DescriptorVersion `protobuf:"varint,9,opt,name=version,casttype=DescriptorVersion" json:"version"` + // Last modification time of the descriptor. + ModificationTime hlc.Timestamp `protobuf:"bytes,10,opt,name=modification_time,json=modificationTime" json:"modification_time"` + DrainingNames []NameInfo `protobuf:"bytes,11,rep,name=draining_names,json=drainingNames" json:"draining_names"` // parent_id represents the ID of the database that this type resides in. ParentID ID `protobuf:"varint,1,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` // parent_schema_id represents the ID of the schema that this type resides in. ParentSchemaID ID `protobuf:"varint,2,opt,name=parent_schema_id,json=parentSchemaId,casttype=ID" json:"parent_schema_id"` - // name is the current name of this user defined type. - Name string `protobuf:"bytes,3,opt,name=name" json:"name"` - // id is the globally unique ID for this type. - ID ID `protobuf:"varint,4,opt,name=id,casttype=ID" json:"id"` // array_type_id is the globally unique ID for the implicitly created array // type for this type. It is only set when the type descriptor points to a // non-array type. @@ -2531,7 +2560,7 @@ func (m *TypeDescriptor) Reset() { *m = TypeDescriptor{} } func (m *TypeDescriptor) String() string { return proto.CompactTextString(m) } func (*TypeDescriptor) ProtoMessage() {} func (*TypeDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{14} + return fileDescriptor_structured_8ef81714cff0efcf, []int{15} } func (m *TypeDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2556,30 +2585,51 @@ func (m *TypeDescriptor) XXX_DiscardUnknown() { var xxx_messageInfo_TypeDescriptor proto.InternalMessageInfo -func (m *TypeDescriptor) GetParentID() ID { +func (m *TypeDescriptor) GetName() string { if m != nil { - return m.ParentID + return m.Name + } + return "" +} + +func (m *TypeDescriptor) GetID() ID { + if m != nil { + return m.ID } return 0 } -func (m *TypeDescriptor) GetParentSchemaID() ID { +func (m *TypeDescriptor) GetVersion() DescriptorVersion { if m != nil { - return m.ParentSchemaID + return m.Version } return 0 } -func (m *TypeDescriptor) GetName() string { +func (m *TypeDescriptor) GetModificationTime() hlc.Timestamp { if m != nil { - return m.Name + return m.ModificationTime } - return "" + return hlc.Timestamp{} } -func (m *TypeDescriptor) GetID() ID { +func (m *TypeDescriptor) GetDrainingNames() []NameInfo { if m != nil { - return m.ID + return m.DrainingNames + } + return nil +} + +func (m *TypeDescriptor) GetParentID() ID { + if m != nil { + return m.ParentID + } + return 0 +} + +func (m *TypeDescriptor) GetParentSchemaID() ID { + if m != nil { + return m.ParentSchemaID } return 0 } @@ -2622,7 +2672,7 @@ func (m *TypeDescriptor_EnumMember) Reset() { *m = TypeDescriptor_EnumMe func (m *TypeDescriptor_EnumMember) String() string { return proto.CompactTextString(m) } func (*TypeDescriptor_EnumMember) ProtoMessage() {} func (*TypeDescriptor_EnumMember) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{14, 0} + return fileDescriptor_structured_8ef81714cff0efcf, []int{15, 0} } func (m *TypeDescriptor_EnumMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2650,12 +2700,16 @@ var xxx_messageInfo_TypeDescriptor_EnumMember proto.InternalMessageInfo // SchemaDescriptor represents a physical schema and is stored in a structured // metadata key. type SchemaDescriptor struct { - // parent_id refers to the database the schema is in. - ParentID ID `protobuf:"varint,1,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` // name is the name of the schema. Name string `protobuf:"bytes,2,opt,name=name" json:"name"` // id is the schema ID, globally unique across all descriptors. ID ID `protobuf:"varint,3,opt,name=id,casttype=ID" json:"id"` + // Last modification time of the descriptor. + ModificationTime hlc.Timestamp `protobuf:"bytes,5,opt,name=modification_time,json=modificationTime" json:"modification_time"` + Version DescriptorVersion `protobuf:"varint,6,opt,name=version,casttype=DescriptorVersion" json:"version"` + DrainingNames []NameInfo `protobuf:"bytes,7,rep,name=draining_names,json=drainingNames" json:"draining_names"` + // parent_id refers to the database the schema is in. + ParentID ID `protobuf:"varint,1,opt,name=parent_id,json=parentId,casttype=ID" json:"parent_id"` // privileges contains the privileges for the schema. Privileges *PrivilegeDescriptor `protobuf:"bytes,4,opt,name=privileges" json:"privileges,omitempty"` } @@ -2664,7 +2718,7 @@ func (m *SchemaDescriptor) Reset() { *m = SchemaDescriptor{} } func (m *SchemaDescriptor) String() string { return proto.CompactTextString(m) } func (*SchemaDescriptor) ProtoMessage() {} func (*SchemaDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{15} + return fileDescriptor_structured_8ef81714cff0efcf, []int{16} } func (m *SchemaDescriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2689,13 +2743,6 @@ func (m *SchemaDescriptor) XXX_DiscardUnknown() { var xxx_messageInfo_SchemaDescriptor proto.InternalMessageInfo -func (m *SchemaDescriptor) GetParentID() ID { - if m != nil { - return m.ParentID - } - return 0 -} - func (m *SchemaDescriptor) GetName() string { if m != nil { return m.Name @@ -2710,6 +2757,34 @@ func (m *SchemaDescriptor) GetID() ID { return 0 } +func (m *SchemaDescriptor) GetModificationTime() hlc.Timestamp { + if m != nil { + return m.ModificationTime + } + return hlc.Timestamp{} +} + +func (m *SchemaDescriptor) GetVersion() DescriptorVersion { + if m != nil { + return m.Version + } + return 0 +} + +func (m *SchemaDescriptor) GetDrainingNames() []NameInfo { + if m != nil { + return m.DrainingNames + } + return nil +} + +func (m *SchemaDescriptor) GetParentID() ID { + if m != nil { + return m.ParentID + } + return 0 +} + func (m *SchemaDescriptor) GetPrivileges() *PrivilegeDescriptor { if m != nil { return m.Privileges @@ -2732,7 +2807,7 @@ func (m *Descriptor) Reset() { *m = Descriptor{} } func (m *Descriptor) String() string { return proto.CompactTextString(m) } func (*Descriptor) ProtoMessage() {} func (*Descriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_structured_6e223c312147c376, []int{16} + return fileDescriptor_structured_8ef81714cff0efcf, []int{17} } func (m *Descriptor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2945,10 +3020,10 @@ func init() { proto.RegisterType((*PrimaryKeySwap)(nil), "cockroach.sql.sqlbase.PrimaryKeySwap") proto.RegisterType((*ComputedColumnSwap)(nil), "cockroach.sql.sqlbase.ComputedColumnSwap") proto.RegisterType((*DescriptorMutation)(nil), "cockroach.sql.sqlbase.DescriptorMutation") + proto.RegisterType((*NameInfo)(nil), "cockroach.sql.sqlbase.NameInfo") proto.RegisterType((*TableDescriptor)(nil), "cockroach.sql.sqlbase.TableDescriptor") proto.RegisterType((*TableDescriptor_SchemaChangeLease)(nil), "cockroach.sql.sqlbase.TableDescriptor.SchemaChangeLease") proto.RegisterType((*TableDescriptor_CheckConstraint)(nil), "cockroach.sql.sqlbase.TableDescriptor.CheckConstraint") - proto.RegisterType((*TableDescriptor_NameInfo)(nil), "cockroach.sql.sqlbase.TableDescriptor.NameInfo") proto.RegisterType((*TableDescriptor_Reference)(nil), "cockroach.sql.sqlbase.TableDescriptor.Reference") proto.RegisterType((*TableDescriptor_MutationJob)(nil), "cockroach.sql.sqlbase.TableDescriptor.MutationJob") proto.RegisterType((*TableDescriptor_SequenceOpts)(nil), "cockroach.sql.sqlbase.TableDescriptor.SequenceOpts") @@ -3808,6 +3883,36 @@ func (this *DescriptorMutation_ComputedColumnSwap) Equal(that interface{}) bool } return true } +func (this *NameInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NameInfo) + if !ok { + that2, ok := that.(NameInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ParentID != that1.ParentID { + return false + } + if this.ParentSchemaID != that1.ParentSchemaID { + return false + } + if this.Name != that1.Name { + return false + } + return true +} func (this *TableDescriptor) Equal(that interface{}) bool { if that == nil { return this == nil @@ -3833,16 +3938,24 @@ func (this *TableDescriptor) Equal(that interface{}) bool { if this.ID != that1.ID { return false } - if this.ParentID != that1.ParentID { + if this.Version != that1.Version { return false } - if this.UnexposedParentSchemaID != that1.UnexposedParentSchemaID { + if !this.ModificationTime.Equal(&that1.ModificationTime) { return false } - if this.Version != that1.Version { + if len(this.DrainingNames) != len(that1.DrainingNames) { return false } - if !this.ModificationTime.Equal(&that1.ModificationTime) { + for i := range this.DrainingNames { + if !this.DrainingNames[i].Equal(&that1.DrainingNames[i]) { + return false + } + } + if this.ParentID != that1.ParentID { + return false + } + if this.UnexposedParentSchemaID != that1.UnexposedParentSchemaID { return false } if len(this.Columns) != len(that1.Columns) { @@ -3915,14 +4028,6 @@ func (this *TableDescriptor) Equal(that interface{}) bool { return false } } - if len(this.DrainingNames) != len(that1.DrainingNames) { - return false - } - for i := range this.DrainingNames { - if !this.DrainingNames[i].Equal(&that1.DrainingNames[i]) { - return false - } - } if this.ViewQuery != that1.ViewQuery { return false } @@ -4071,36 +4176,6 @@ func (this *TableDescriptor_CheckConstraint) Equal(that interface{}) bool { } return true } -func (this *TableDescriptor_NameInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TableDescriptor_NameInfo) - if !ok { - that2, ok := that.(TableDescriptor_NameInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ParentID != that1.ParentID { - return false - } - if this.ParentSchemaID != that1.ParentSchemaID { - return false - } - if this.Name != that1.Name { - return false - } - return true -} func (this *TableDescriptor_Reference) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4311,6 +4386,20 @@ func (this *DatabaseDescriptor) Equal(that interface{}) bool { if this.ID != that1.ID { return false } + if !this.ModificationTime.Equal(&that1.ModificationTime) { + return false + } + if this.Version != that1.Version { + return false + } + if len(this.DrainingNames) != len(that1.DrainingNames) { + return false + } + for i := range this.DrainingNames { + if !this.DrainingNames[i].Equal(&that1.DrainingNames[i]) { + return false + } + } if !this.Privileges.Equal(that1.Privileges) { return false } @@ -4335,16 +4424,30 @@ func (this *TypeDescriptor) Equal(that interface{}) bool { } else if this == nil { return false } - if this.ParentID != that1.ParentID { + if this.Name != that1.Name { return false } - if this.ParentSchemaID != that1.ParentSchemaID { + if this.ID != that1.ID { return false } - if this.Name != that1.Name { + if this.Version != that1.Version { return false } - if this.ID != that1.ID { + if !this.ModificationTime.Equal(&that1.ModificationTime) { + return false + } + if len(this.DrainingNames) != len(that1.DrainingNames) { + return false + } + for i := range this.DrainingNames { + if !this.DrainingNames[i].Equal(&that1.DrainingNames[i]) { + return false + } + } + if this.ParentID != that1.ParentID { + return false + } + if this.ParentSchemaID != that1.ParentSchemaID { return false } if this.ArrayTypeID != that1.ArrayTypeID { @@ -4412,15 +4515,29 @@ func (this *SchemaDescriptor) Equal(that interface{}) bool { } else if this == nil { return false } - if this.ParentID != that1.ParentID { - return false - } if this.Name != that1.Name { return false } if this.ID != that1.ID { return false } + if !this.ModificationTime.Equal(&that1.ModificationTime) { + return false + } + if this.Version != that1.Version { + return false + } + if len(this.DrainingNames) != len(that1.DrainingNames) { + return false + } + for i := range this.DrainingNames { + if !this.DrainingNames[i].Equal(&that1.DrainingNames[i]) { + return false + } + } + if this.ParentID != that1.ParentID { + return false + } if !this.Privileges.Equal(that1.Privileges) { return false } @@ -5440,6 +5557,34 @@ func (m *DescriptorMutation_ComputedColumnSwap) MarshalTo(dAtA []byte) (int, err } return i, nil } +func (m *NameInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NameInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.ParentID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintStructured(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x18 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.ParentSchemaID)) + return i, nil +} + func (m *TableDescriptor) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5846,34 +5991,6 @@ func (m *TableDescriptor_CheckConstraint) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *TableDescriptor_NameInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableDescriptor_NameInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintStructured(dAtA, i, uint64(m.ParentID)) - dAtA[i] = 0x12 - i++ - i = encodeVarintStructured(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x18 - i++ - i = encodeVarintStructured(dAtA, i, uint64(m.ParentSchemaID)) - return i, nil -} - func (m *TableDescriptor_Reference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6087,6 +6204,29 @@ func (m *DatabaseDescriptor) MarshalTo(dAtA []byte) (int, error) { } i += n25 } + dAtA[i] = 0x22 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.ModificationTime.Size())) + n26, err := m.ModificationTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x28 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.Version)) + if len(m.DrainingNames) > 0 { + for _, msg := range m.DrainingNames { + dAtA[i] = 0x32 + i++ + i = encodeVarintStructured(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6137,15 +6277,38 @@ func (m *TypeDescriptor) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintStructured(dAtA, i, uint64(m.Alias.Size())) - n26, err := m.Alias.MarshalTo(dAtA[i:]) + n27, err := m.Alias.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } dAtA[i] = 0x40 i++ i = encodeVarintStructured(dAtA, i, uint64(m.ArrayTypeID)) + dAtA[i] = 0x48 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.Version)) + dAtA[i] = 0x52 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.ModificationTime.Size())) + n28, err := m.ModificationTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + if len(m.DrainingNames) > 0 { + for _, msg := range m.DrainingNames { + dAtA[i] = 0x5a + i++ + i = encodeVarintStructured(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6206,11 +6369,34 @@ func (m *SchemaDescriptor) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintStructured(dAtA, i, uint64(m.Privileges.Size())) - n27, err := m.Privileges.MarshalTo(dAtA[i:]) + n29, err := m.Privileges.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n29 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.ModificationTime.Size())) + n30, err := m.ModificationTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x30 + i++ + i = encodeVarintStructured(dAtA, i, uint64(m.Version)) + if len(m.DrainingNames) > 0 { + for _, msg := range m.DrainingNames { + dAtA[i] = 0x3a + i++ + i = encodeVarintStructured(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -6231,11 +6417,11 @@ func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.Union != nil { - nn28, err := m.Union.MarshalTo(dAtA[i:]) + nn31, err := m.Union.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn28 + i += nn31 } return i, nil } @@ -6246,11 +6432,11 @@ func (m *Descriptor_Table) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintStructured(dAtA, i, uint64(m.Table.Size())) - n29, err := m.Table.MarshalTo(dAtA[i:]) + n32, err := m.Table.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n32 } return i, nil } @@ -6260,11 +6446,11 @@ func (m *Descriptor_Database) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintStructured(dAtA, i, uint64(m.Database.Size())) - n30, err := m.Database.MarshalTo(dAtA[i:]) + n33, err := m.Database.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n33 } return i, nil } @@ -6274,11 +6460,11 @@ func (m *Descriptor_Type) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintStructured(dAtA, i, uint64(m.Type.Size())) - n31, err := m.Type.MarshalTo(dAtA[i:]) + n34, err := m.Type.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n34 } return i, nil } @@ -6288,11 +6474,11 @@ func (m *Descriptor_Schema) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintStructured(dAtA, i, uint64(m.Schema.Size())) - n32, err := m.Schema.MarshalTo(dAtA[i:]) + n35, err := m.Schema.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n35 } return i, nil } @@ -6724,6 +6910,19 @@ func (m *DescriptorMutation_ComputedColumnSwap) Size() (n int) { } return n } +func (m *NameInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStructured(uint64(m.ParentID)) + l = len(m.Name) + n += 1 + l + sovStructured(uint64(l)) + n += 1 + sovStructured(uint64(m.ParentSchemaID)) + return n +} + func (m *TableDescriptor) Size() (n int) { if m == nil { return 0 @@ -6878,19 +7077,6 @@ func (m *TableDescriptor_CheckConstraint) Size() (n int) { return n } -func (m *TableDescriptor_NameInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovStructured(uint64(m.ParentID)) - l = len(m.Name) - n += 1 + l + sovStructured(uint64(l)) - n += 1 + sovStructured(uint64(m.ParentSchemaID)) - return n -} - func (m *TableDescriptor_Reference) Size() (n int) { if m == nil { return 0 @@ -6982,6 +7168,15 @@ func (m *DatabaseDescriptor) Size() (n int) { l = m.Privileges.Size() n += 1 + l + sovStructured(uint64(l)) } + l = m.ModificationTime.Size() + n += 1 + l + sovStructured(uint64(l)) + n += 1 + sovStructured(uint64(m.Version)) + if len(m.DrainingNames) > 0 { + for _, e := range m.DrainingNames { + l = e.Size() + n += 1 + l + sovStructured(uint64(l)) + } + } return n } @@ -7003,11 +7198,20 @@ func (m *TypeDescriptor) Size() (n int) { n += 1 + l + sovStructured(uint64(l)) } } - if m.Alias != nil { - l = m.Alias.Size() - n += 1 + l + sovStructured(uint64(l)) - } - n += 1 + sovStructured(uint64(m.ArrayTypeID)) + if m.Alias != nil { + l = m.Alias.Size() + n += 1 + l + sovStructured(uint64(l)) + } + n += 1 + sovStructured(uint64(m.ArrayTypeID)) + n += 1 + sovStructured(uint64(m.Version)) + l = m.ModificationTime.Size() + n += 1 + l + sovStructured(uint64(l)) + if len(m.DrainingNames) > 0 { + for _, e := range m.DrainingNames { + l = e.Size() + n += 1 + l + sovStructured(uint64(l)) + } + } return n } @@ -7040,6 +7244,15 @@ func (m *SchemaDescriptor) Size() (n int) { l = m.Privileges.Size() n += 1 + l + sovStructured(uint64(l)) } + l = m.ModificationTime.Size() + n += 1 + l + sovStructured(uint64(l)) + n += 1 + sovStructured(uint64(m.Version)) + if len(m.DrainingNames) > 0 { + for _, e := range m.DrainingNames { + l = e.Size() + n += 1 + l + sovStructured(uint64(l)) + } + } return n } @@ -10753,6 +10966,123 @@ func (m *DescriptorMutation) Unmarshal(dAtA []byte) error { } return nil } +func (m *NameInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NameInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NameInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) + } + m.ParentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentID |= (ID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSchemaID", wireType) + } + m.ParentSchemaID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentSchemaID |= (ID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStructured(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStructured + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TableDescriptor) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -11239,7 +11569,7 @@ func (m *TableDescriptor) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DrainingNames = append(m.DrainingNames, TableDescriptor_NameInfo{}) + m.DrainingNames = append(m.DrainingNames, NameInfo{}) if err := m.DrainingNames[len(m.DrainingNames)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12097,138 +12427,20 @@ func (m *TableDescriptor_CheckConstraint) Unmarshal(dAtA []byte) error { } if iNdEx >= l { return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IsNonNullConstraint = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hidden", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStructured - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Hidden = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipStructured(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStructured - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableDescriptor_NameInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStructured - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NameInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NameInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentID", wireType) - } - m.ParentID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStructured - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ParentID |= (ID(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStructured - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStructured - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + m.IsNonNullConstraint = bool(v != 0) + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentSchemaID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hidden", wireType) } - m.ParentSchemaID = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStructured @@ -12238,11 +12450,12 @@ func (m *TableDescriptor_NameInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ParentSchemaID |= (ID(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + m.Hidden = bool(v != 0) default: iNdEx = preIndex skippy, err := skipStructured(dAtA[iNdEx:]) @@ -13093,6 +13306,86 @@ func (m *DatabaseDescriptor) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (DescriptorVersion(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DrainingNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DrainingNames = append(m.DrainingNames, NameInfo{}) + if err := m.DrainingNames[len(m.DrainingNames)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStructured(dAtA[iNdEx:]) @@ -13331,6 +13624,86 @@ func (m *TypeDescriptor) Unmarshal(dAtA []byte) error { break } } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (DescriptorVersion(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DrainingNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DrainingNames = append(m.DrainingNames, NameInfo{}) + if err := m.DrainingNames[len(m.DrainingNames)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStructured(dAtA[iNdEx:]) @@ -13591,6 +13964,86 @@ func (m *SchemaDescriptor) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModificationTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ModificationTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (DescriptorVersion(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DrainingNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStructured + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStructured + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DrainingNames = append(m.DrainingNames, NameInfo{}) + if err := m.DrainingNames[len(m.DrainingNames)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStructured(dAtA[iNdEx:]) @@ -13896,272 +14349,275 @@ var ( ) func init() { - proto.RegisterFile("sql/sqlbase/structured.proto", fileDescriptor_structured_6e223c312147c376) -} - -var fileDescriptor_structured_6e223c312147c376 = []byte{ - // 4195 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0x49, 0x6f, 0x23, 0x57, - 0x7a, 0x2a, 0xee, 0xfc, 0xb8, 0xa8, 0xf8, 0xb4, 0x34, 0x5b, 0xf6, 0x48, 0x6a, 0xb6, 0xdb, 0xd6, - 0x8c, 0x6d, 0xa9, 0xad, 0x9e, 0x64, 0x7a, 0xec, 0x64, 0x60, 0x8a, 0xa4, 0x5a, 0x6c, 0xa9, 0x49, - 0xb9, 0xa4, 0x76, 0xcf, 0x24, 0x93, 0x54, 0x4a, 0xac, 0x47, 0xaa, 0xdc, 0xc5, 0x2a, 0x76, 0x55, - 0xb1, 0x5b, 0x04, 0x72, 0x4a, 0x80, 0x60, 0x2e, 0x09, 0x72, 0x49, 0x6e, 0x01, 0x8c, 0xc4, 0x48, - 0xe6, 0x9a, 0x4b, 0x72, 0x0b, 0x90, 0x9b, 0x6f, 0x19, 0x20, 0x97, 0xc9, 0x45, 0x48, 0xe4, 0x4b, - 0x7e, 0x41, 0x02, 0xf8, 0x14, 0xbc, 0xad, 0x16, 0x2e, 0x1a, 0xaa, 0xdb, 0xb9, 0x08, 0xaa, 0x6f, - 0x7b, 0xef, 0x7d, 0xef, 0xdb, 0x1f, 0xe1, 0x6d, 0xf7, 0x85, 0xb9, 0xe3, 0xbe, 0x30, 0xcf, 0x34, - 0x17, 0xef, 0xb8, 0x9e, 0x33, 0xec, 0x78, 0x43, 0x07, 0xeb, 0xdb, 0x03, 0xc7, 0xf6, 0x6c, 0xb4, - 0xd2, 0xb1, 0x3b, 0xcf, 0x1d, 0x5b, 0xeb, 0x9c, 0x6f, 0xbb, 0x2f, 0xcc, 0x6d, 0x4e, 0xb7, 0x56, - 0x1e, 0x7a, 0x86, 0xb9, 0x73, 0x6e, 0x76, 0x76, 0x3c, 0xa3, 0x8f, 0x5d, 0x4f, 0xeb, 0x0f, 0x18, - 0xc3, 0xda, 0x5b, 0x61, 0x71, 0x03, 0xc7, 0x78, 0x69, 0x98, 0xb8, 0x87, 0x39, 0x72, 0x85, 0x20, - 0xbd, 0xd1, 0x00, 0xbb, 0xec, 0x2f, 0x07, 0xdf, 0xee, 0x61, 0x7b, 0xa7, 0x87, 0x6d, 0xc3, 0xd2, - 0xf1, 0xc5, 0x4e, 0xc7, 0xb6, 0xba, 0x46, 0x8f, 0xa3, 0x96, 0x7b, 0x76, 0xcf, 0xa6, 0xff, 0xee, - 0x90, 0xff, 0x18, 0xb4, 0xf2, 0x27, 0x49, 0x58, 0xda, 0xb7, 0x1d, 0x6c, 0xf4, 0xac, 0x43, 0x3c, - 0x52, 0x70, 0x17, 0x3b, 0xd8, 0xea, 0x60, 0xb4, 0x09, 0x49, 0x4f, 0x3b, 0x33, 0x71, 0x59, 0xda, - 0x94, 0xb6, 0x0a, 0x7b, 0xf0, 0xf5, 0xe5, 0xc6, 0xc2, 0xb7, 0x97, 0x1b, 0xb1, 0x66, 0x5d, 0x61, - 0x08, 0x74, 0x0f, 0x92, 0x74, 0x95, 0x72, 0x8c, 0x52, 0x2c, 0x72, 0x8a, 0x74, 0x93, 0x00, 0x09, - 0x19, 0xc5, 0xa2, 0x32, 0x24, 0x2c, 0xad, 0x8f, 0xcb, 0xf1, 0x4d, 0x69, 0x2b, 0xbb, 0x97, 0x20, - 0x54, 0x0a, 0x85, 0xa0, 0x43, 0xc8, 0xbc, 0xd4, 0x4c, 0x43, 0x37, 0xbc, 0x51, 0x39, 0xb1, 0x29, - 0x6d, 0x15, 0x77, 0xbf, 0xbf, 0x3d, 0x55, 0x47, 0xdb, 0x35, 0xdb, 0x72, 0x3d, 0x47, 0x33, 0x2c, - 0xef, 0x73, 0xce, 0xc0, 0x05, 0xf9, 0x02, 0xd0, 0x7d, 0x28, 0xb9, 0xe7, 0x9a, 0x83, 0x75, 0x75, - 0xe0, 0xe0, 0xae, 0x71, 0xa1, 0x9a, 0xd8, 0x2a, 0x27, 0x37, 0xa5, 0xad, 0x24, 0x27, 0x5d, 0x64, - 0xe8, 0x63, 0x8a, 0x3d, 0xc2, 0x16, 0x3a, 0x85, 0xac, 0x6d, 0xa9, 0x3a, 0x36, 0xb1, 0x87, 0xcb, - 0x29, 0xba, 0xfe, 0x47, 0x33, 0xd6, 0x9f, 0xa2, 0xa0, 0xed, 0x6a, 0xc7, 0x33, 0x6c, 0x4b, 0xec, - 0xc3, 0xb6, 0xea, 0x54, 0x10, 0x97, 0x3a, 0x1c, 0xe8, 0x9a, 0x87, 0xcb, 0xe9, 0x37, 0x96, 0xfa, - 0x94, 0x0a, 0x42, 0x47, 0x90, 0xec, 0x6b, 0x5e, 0xe7, 0xbc, 0x9c, 0xa1, 0x12, 0xef, 0xdf, 0x40, - 0xe2, 0x13, 0xc2, 0xc7, 0x05, 0x32, 0x21, 0x95, 0x67, 0x90, 0x62, 0xeb, 0xa0, 0x02, 0x64, 0x5b, - 0x6d, 0xb5, 0x5a, 0x3b, 0x6d, 0xb6, 0x5b, 0xf2, 0x02, 0xca, 0x43, 0x46, 0x69, 0x9c, 0x9c, 0x2a, - 0xcd, 0xda, 0xa9, 0x2c, 0x91, 0xaf, 0x93, 0xc6, 0xa9, 0xda, 0x7a, 0x7a, 0x74, 0x24, 0xc7, 0xd0, - 0x22, 0xe4, 0xc8, 0x57, 0xbd, 0xb1, 0x5f, 0x7d, 0x7a, 0x74, 0x2a, 0xc7, 0x51, 0x0e, 0xd2, 0xb5, - 0xea, 0x49, 0xad, 0x5a, 0x6f, 0xc8, 0x89, 0xb5, 0xc4, 0x2f, 0xbf, 0x5a, 0x5f, 0xa8, 0xdc, 0x87, - 0x24, 0x5d, 0x0e, 0x01, 0xa4, 0x4e, 0x9a, 0x4f, 0x8e, 0x8f, 0x1a, 0xf2, 0x02, 0xca, 0x40, 0x62, - 0x9f, 0x88, 0x90, 0x08, 0xc7, 0x71, 0x55, 0x39, 0x6d, 0x56, 0x8f, 0xe4, 0x18, 0xe3, 0xf8, 0x38, - 0xf1, 0xdf, 0x5f, 0x6e, 0x48, 0x95, 0x7f, 0x4f, 0xc1, 0x72, 0xb0, 0xf7, 0xe0, 0xb6, 0x51, 0x0d, - 0x16, 0x6d, 0xc7, 0xe8, 0x19, 0x96, 0x4a, 0x6d, 0x4e, 0x35, 0x74, 0x6e, 0x8f, 0x6f, 0x91, 0xf3, - 0x5c, 0x5d, 0x6e, 0x14, 0xda, 0x14, 0x7d, 0x4a, 0xb0, 0xcd, 0x3a, 0x37, 0xd0, 0x82, 0x1d, 0x02, - 0xea, 0xe8, 0x10, 0x4a, 0x5c, 0x48, 0xc7, 0x36, 0x87, 0x7d, 0x4b, 0x35, 0x74, 0xb7, 0x1c, 0xdb, - 0x8c, 0x6f, 0x15, 0xf6, 0x36, 0xae, 0x2e, 0x37, 0x16, 0x99, 0x88, 0x1a, 0xc5, 0x35, 0xeb, 0xee, - 0xb7, 0x97, 0x1b, 0x19, 0xf1, 0xa1, 0xf0, 0xe5, 0xf9, 0xb7, 0xee, 0xa2, 0x67, 0xb0, 0xe2, 0x08, - 0xdd, 0xea, 0x61, 0x81, 0x71, 0x2a, 0xf0, 0xee, 0xd5, 0xe5, 0xc6, 0x92, 0xaf, 0x7c, 0x7d, 0xba, - 0xd0, 0x25, 0x67, 0x9c, 0x40, 0x77, 0x51, 0x1b, 0x42, 0xe0, 0xe0, 0xb8, 0x09, 0x7a, 0xdc, 0x0d, - 0x7e, 0xdc, 0x52, 0x20, 0x3a, 0x7a, 0xe4, 0x92, 0x33, 0x86, 0xd0, 0x7d, 0xc7, 0x4b, 0x5e, 0xeb, - 0x78, 0xa9, 0x37, 0x75, 0xbc, 0x88, 0x1b, 0xa5, 0xff, 0x5f, 0xdc, 0x28, 0xf3, 0x9d, 0xbb, 0x51, - 0xf6, 0x3b, 0x70, 0x23, 0x54, 0x85, 0x25, 0x13, 0xf7, 0xb4, 0xce, 0x48, 0xe5, 0xe6, 0xc5, 0xc2, - 0x21, 0xd0, 0x1b, 0x2b, 0x8d, 0x85, 0xc3, 0xb2, 0xa4, 0x94, 0x18, 0x35, 0x33, 0x37, 0x0a, 0x46, - 0x4d, 0xb8, 0xc5, 0x45, 0x84, 0xee, 0x9e, 0x89, 0xc9, 0xcd, 0x12, 0xb3, 0xc2, 0x38, 0x02, 0x4b, - 0xa0, 0x28, 0xe6, 0x49, 0x8f, 0x13, 0x99, 0xbc, 0x5c, 0x78, 0x9c, 0xc8, 0x14, 0xe4, 0x62, 0xe5, - 0xaf, 0x13, 0x20, 0x33, 0xfb, 0xaa, 0x63, 0xb7, 0xe3, 0x18, 0x03, 0xcf, 0x76, 0x7c, 0xab, 0x90, - 0x26, 0xac, 0xe2, 0x5d, 0x88, 0x19, 0x3a, 0x0f, 0xe6, 0xab, 0xdc, 0xde, 0x62, 0xd4, 0xc0, 0x02, - 0xcb, 0x8d, 0x19, 0x3a, 0xda, 0x86, 0x04, 0xc9, 0x38, 0x34, 0xa0, 0xe7, 0x76, 0xd7, 0xc6, 0x75, - 0x88, 0xfb, 0xdb, 0x2c, 0x21, 0x9d, 0x2a, 0x94, 0x0e, 0x6d, 0x42, 0xc6, 0x1a, 0x9a, 0x26, 0x4d, - 0x26, 0xc4, 0x9a, 0x33, 0xe2, 0x5a, 0x04, 0x14, 0xdd, 0x81, 0xbc, 0x8e, 0xbb, 0xda, 0xd0, 0xf4, - 0x54, 0x7c, 0x31, 0x70, 0x98, 0xc5, 0x2a, 0x39, 0x0e, 0x6b, 0x5c, 0x0c, 0x1c, 0xf4, 0x36, 0xa4, - 0xce, 0x0d, 0x5d, 0xc7, 0x16, 0x35, 0x58, 0x21, 0x82, 0xc3, 0xd0, 0x2e, 0x94, 0x86, 0x2e, 0x76, - 0x55, 0x17, 0xbf, 0x18, 0x12, 0x95, 0x50, 0x87, 0x04, 0xea, 0x90, 0x29, 0xee, 0x20, 0x8b, 0x84, - 0xe0, 0x84, 0xe3, 0x89, 0xbf, 0xdd, 0x81, 0x7c, 0xc7, 0xee, 0x0f, 0x86, 0x1e, 0x66, 0x8b, 0xe6, - 0xd8, 0xa2, 0x1c, 0x46, 0x17, 0xdd, 0x85, 0x92, 0xfd, 0xca, 0x1a, 0x13, 0x9b, 0x8f, 0x8a, 0x25, - 0x04, 0x61, 0xb1, 0x7b, 0x00, 0xa6, 0xdd, 0x33, 0x3a, 0x9a, 0x49, 0xbc, 0xb7, 0x40, 0xb5, 0x79, - 0x97, 0x6b, 0x73, 0xf1, 0x88, 0x61, 0x84, 0x3a, 0x23, 0xaa, 0xcd, 0x72, 0xb6, 0xa6, 0x8e, 0xf6, - 0xe1, 0x7b, 0x9a, 0xe9, 0x61, 0x47, 0x84, 0x17, 0xa2, 0x46, 0xd5, 0xb0, 0xd4, 0x81, 0x63, 0xf7, - 0x1c, 0xec, 0xba, 0xe5, 0x62, 0x48, 0x07, 0xb7, 0x29, 0x29, 0x13, 0x73, 0x3a, 0x1a, 0xe0, 0xa6, - 0x75, 0xcc, 0xc9, 0x7c, 0x93, 0xc8, 0xc8, 0xd9, 0xc7, 0x89, 0x4c, 0x56, 0x86, 0xc7, 0x89, 0x4c, - 0x5a, 0xce, 0x54, 0xfe, 0x22, 0x06, 0xab, 0x8c, 0x61, 0x5f, 0xeb, 0x1b, 0xe6, 0xe8, 0x4d, 0xcd, - 0x83, 0x49, 0xe1, 0xe6, 0x41, 0xf5, 0x4a, 0xb7, 0x4d, 0xd8, 0x58, 0x5c, 0xa4, 0x7a, 0x25, 0xb0, - 0x16, 0x01, 0xa1, 0x87, 0x00, 0xa1, 0xc0, 0x99, 0xa0, 0x0a, 0xbd, 0x7d, 0x75, 0xb9, 0x91, 0x9d, - 0x1e, 0x2e, 0xb3, 0x9d, 0x50, 0x90, 0x2c, 0x09, 0x4b, 0xf1, 0x25, 0x50, 0x73, 0x09, 0x29, 0xb9, - 0xce, 0x08, 0xa6, 0x2a, 0x79, 0x51, 0x8f, 0x20, 0x75, 0x9e, 0x7f, 0xfe, 0x39, 0x06, 0xcb, 0x4d, - 0xcb, 0xc3, 0x8e, 0x89, 0xb5, 0x97, 0x38, 0xa4, 0x8e, 0x9f, 0x42, 0x56, 0xb3, 0x3a, 0xd8, 0xf5, - 0x6c, 0xc7, 0x2d, 0x4b, 0x9b, 0xf1, 0xad, 0xdc, 0xee, 0x0f, 0x67, 0x04, 0x8d, 0x69, 0xfc, 0xdb, - 0x55, 0xce, 0xcc, 0x35, 0x19, 0x08, 0x5b, 0xfb, 0x17, 0x09, 0x32, 0x02, 0x8b, 0xee, 0x43, 0x66, - 0x2c, 0xbf, 0xad, 0xf0, 0xd3, 0xa4, 0xa3, 0x61, 0x3e, 0xed, 0xf1, 0xe0, 0xfe, 0x5b, 0x90, 0xa1, - 0x61, 0x42, 0xf5, 0xef, 0x64, 0x4d, 0x70, 0xf0, 0x48, 0x11, 0x2e, 0xc5, 0xd2, 0x94, 0xb6, 0xa9, - 0xa3, 0xda, 0xb4, 0x2a, 0x29, 0x4e, 0xf9, 0x6f, 0x09, 0xfd, 0x9d, 0x44, 0xeb, 0xa4, 0x89, 0xc2, - 0x89, 0xe9, 0x8c, 0x6b, 0xee, 0x9f, 0x24, 0x28, 0x11, 0x06, 0x1d, 0xeb, 0x21, 0xb5, 0xdd, 0x05, - 0x30, 0x5c, 0xd5, 0x65, 0x70, 0x7a, 0x22, 0x61, 0xad, 0x59, 0xc3, 0xe5, 0xe4, 0xbe, 0xa9, 0xc5, - 0x26, 0x4c, 0xed, 0xc7, 0x50, 0xa0, 0xbc, 0xea, 0xd9, 0xb0, 0xf3, 0x1c, 0x7b, 0x2e, 0xdd, 0x61, - 0x72, 0x6f, 0x99, 0xef, 0x30, 0x4f, 0x25, 0xec, 0x31, 0x9c, 0x92, 0x77, 0x43, 0x5f, 0x13, 0xd6, - 0x97, 0x98, 0xb0, 0x3e, 0xbe, 0xf1, 0xff, 0x8d, 0xc3, 0xea, 0xb1, 0xe6, 0x78, 0x06, 0x49, 0x14, - 0x86, 0xd5, 0x0b, 0xed, 0xfe, 0x1e, 0xe4, 0xac, 0x61, 0x9f, 0x1b, 0x98, 0xcb, 0x2f, 0x84, 0xed, - 0x0f, 0xac, 0x61, 0x9f, 0xd9, 0x8e, 0x8b, 0x8e, 0x20, 0x61, 0x1a, 0xae, 0x47, 0x2b, 0x89, 0xdc, - 0xee, 0xee, 0x0c, 0xb3, 0x98, 0xbe, 0xc6, 0xf6, 0x91, 0xe1, 0x7a, 0xe2, 0xcc, 0x44, 0x0a, 0x6a, - 0x43, 0xd2, 0xd1, 0xac, 0x1e, 0xa6, 0xfe, 0x92, 0xdb, 0x7d, 0x70, 0x33, 0x71, 0x0a, 0x61, 0x15, - 0xd9, 0x89, 0xca, 0x59, 0xfb, 0x1b, 0x09, 0x12, 0x64, 0x95, 0x6b, 0x5c, 0x7a, 0x15, 0x52, 0x2f, - 0x35, 0x73, 0x88, 0x59, 0x35, 0x94, 0x57, 0xf8, 0x17, 0xfa, 0x03, 0x58, 0x74, 0x87, 0x67, 0x83, - 0xd0, 0x52, 0x3c, 0xd8, 0x7f, 0x78, 0xa3, 0x5d, 0xf9, 0x85, 0x77, 0x54, 0x16, 0xbb, 0x80, 0xb5, - 0x17, 0x90, 0xa4, 0xbb, 0xbe, 0x66, 0x7f, 0x77, 0x20, 0xef, 0xd9, 0x2a, 0xbe, 0xe8, 0x98, 0x43, - 0xd7, 0x78, 0xc9, 0x2c, 0x25, 0xaf, 0xe4, 0x3c, 0xbb, 0x21, 0x40, 0xe8, 0x1e, 0x14, 0xbb, 0x8e, - 0xdd, 0x57, 0x0d, 0x4b, 0x10, 0xc5, 0x29, 0x51, 0x81, 0x40, 0x9b, 0x02, 0x18, 0x31, 0xd9, 0xbf, - 0xca, 0xc3, 0x22, 0x75, 0x8c, 0xb9, 0xc2, 0xde, 0xbd, 0x50, 0xd8, 0x5b, 0x89, 0x84, 0x3d, 0xdf, - 0xbb, 0x48, 0xd4, 0x7b, 0x1b, 0x52, 0x43, 0xcb, 0x78, 0x31, 0x64, 0xeb, 0xfb, 0xf9, 0x89, 0xc1, - 0xe6, 0xb0, 0x4a, 0xf4, 0x01, 0x20, 0x12, 0x0a, 0xb0, 0x1a, 0x21, 0x4c, 0x52, 0x42, 0x99, 0x62, - 0x6a, 0x33, 0x23, 0x68, 0xea, 0x06, 0x11, 0xf4, 0x00, 0x64, 0x7c, 0xe1, 0x39, 0x5a, 0xb8, 0x74, - 0x4d, 0x53, 0xfe, 0xf5, 0xab, 0xcb, 0x8d, 0x62, 0x83, 0xe0, 0xa6, 0x0b, 0x29, 0xe2, 0x10, 0x4e, - 0x27, 0x56, 0x52, 0xe2, 0x32, 0x74, 0xc3, 0xc1, 0xb4, 0xe0, 0x72, 0xcb, 0x99, 0xcd, 0xf8, 0x35, - 0x85, 0xd5, 0x98, 0xda, 0xb7, 0xeb, 0x82, 0x51, 0x91, 0x99, 0x28, 0x1f, 0xe0, 0xa2, 0x13, 0xc8, - 0x75, 0x59, 0x1d, 0xa6, 0x3e, 0xc7, 0x23, 0x5a, 0xb1, 0xe5, 0x76, 0x7f, 0x30, 0x7f, 0xc5, 0xb6, - 0x97, 0x22, 0x57, 0x50, 0x96, 0x14, 0xe8, 0xfa, 0x48, 0xf4, 0x0c, 0x0a, 0xa1, 0x42, 0xeb, 0x6c, - 0x44, 0x8b, 0x84, 0xd7, 0x13, 0x9b, 0x0f, 0x04, 0xed, 0x8d, 0xd0, 0x67, 0x00, 0x86, 0x9f, 0x00, - 0x68, 0x2d, 0x91, 0xdb, 0x7d, 0xff, 0x06, 0x99, 0x42, 0xc4, 0x97, 0x40, 0x08, 0x7a, 0x06, 0xc5, - 0xe0, 0x8b, 0x6e, 0x36, 0x7f, 0xe3, 0xcd, 0x32, 0xa9, 0x85, 0x90, 0x9c, 0x3d, 0x52, 0xb1, 0x2f, - 0x93, 0x2a, 0xc7, 0x76, 0x0d, 0x0f, 0x87, 0xcd, 0xa0, 0x40, 0xcd, 0xa0, 0x72, 0x75, 0xb9, 0x81, - 0x6a, 0x02, 0x3f, 0xdd, 0x14, 0x50, 0x67, 0x0c, 0xcf, 0x0c, 0x2b, 0x62, 0xc0, 0x44, 0x62, 0x31, - 0x30, 0xac, 0x93, 0xc0, 0x84, 0x27, 0x0c, 0x2b, 0x64, 0xde, 0xac, 0xc5, 0xca, 0x47, 0x62, 0xcf, - 0xe2, 0xeb, 0xc7, 0x9e, 0x88, 0x20, 0xd4, 0xe0, 0x95, 0xab, 0x4c, 0xab, 0xff, 0xf7, 0xe7, 0x34, - 0x52, 0x52, 0x54, 0x89, 0x90, 0x40, 0x0b, 0xda, 0x07, 0x80, 0x3a, 0x0e, 0xd6, 0x3c, 0xac, 0x93, - 0xca, 0xd1, 0x34, 0x3a, 0x86, 0x67, 0x8e, 0xca, 0xa5, 0x90, 0xdf, 0x97, 0x38, 0xbe, 0xe1, 0xa3, - 0xd1, 0x43, 0x48, 0xbf, 0xc4, 0x8e, 0x6b, 0xd8, 0x56, 0x19, 0xd1, 0x60, 0xb2, 0xce, 0x2b, 0xfb, - 0xd5, 0xb1, 0xf5, 0x3e, 0x67, 0x54, 0x8a, 0x20, 0x47, 0x07, 0x50, 0xc0, 0x56, 0xc7, 0xd6, 0x0d, - 0xab, 0x47, 0x2b, 0xc1, 0xf2, 0x52, 0x50, 0xef, 0x7c, 0x7b, 0xb9, 0xf1, 0xd6, 0x18, 0x7f, 0x83, - 0xd3, 0x92, 0x6d, 0x2b, 0x79, 0x1c, 0xfa, 0x42, 0x07, 0x90, 0x16, 0x39, 0x79, 0x99, 0xea, 0x74, - 0x6b, 0x86, 0x0a, 0x26, 0x32, 0x3a, 0x3f, 0x97, 0x60, 0x27, 0x35, 0xbd, 0x6e, 0xb8, 0xa4, 0x16, - 0xd1, 0xcb, 0x2b, 0xe1, 0x9a, 0x5e, 0x40, 0x51, 0x0d, 0xa0, 0x87, 0x6d, 0x95, 0x4d, 0xa0, 0xca, - 0xab, 0x74, 0xb9, 0xf5, 0xd0, 0x72, 0x3d, 0x6c, 0x6f, 0x8b, 0x39, 0x15, 0x69, 0x33, 0xbb, 0x46, - 0x4f, 0x94, 0x08, 0x3d, 0x6c, 0x33, 0x00, 0xaa, 0x40, 0x76, 0xe0, 0x60, 0xdd, 0xe8, 0x90, 0x2e, - 0xf0, 0x56, 0x28, 0x36, 0x07, 0xe0, 0xca, 0x3a, 0x64, 0xfd, 0xa8, 0x81, 0xd2, 0x10, 0xaf, 0x9e, - 0xd4, 0xd8, 0xd0, 0xa1, 0xde, 0x38, 0xa9, 0xc9, 0x52, 0xe5, 0x0e, 0x24, 0xe8, 0xe1, 0x73, 0x90, - 0xde, 0x6f, 0x2b, 0xcf, 0xaa, 0x4a, 0x9d, 0x0d, 0x3a, 0x9a, 0xad, 0xcf, 0x1b, 0xca, 0x69, 0xa3, - 0x2e, 0x8b, 0xbc, 0xf0, 0xaf, 0x71, 0x40, 0x41, 0xbf, 0x7b, 0x6a, 0xf3, 0x9e, 0xb1, 0x07, 0x8b, - 0x1d, 0x1f, 0xca, 0x2e, 0x40, 0xda, 0x8c, 0x6d, 0x15, 0x77, 0x1f, 0xfe, 0xc6, 0x9e, 0x59, 0xc8, - 0x08, 0x83, 0x02, 0x63, 0x2a, 0x76, 0x22, 0xd0, 0x50, 0x3d, 0x14, 0x1b, 0xcb, 0x41, 0x0a, 0x24, - 0x3b, 0xe7, 0xb8, 0xf3, 0x9c, 0x67, 0xe1, 0xdf, 0x9e, 0xb1, 0x30, 0x2d, 0x15, 0x43, 0x86, 0x5b, - 0x23, 0x3c, 0xc1, 0xd2, 0xa2, 0x3c, 0xa0, 0xa2, 0x90, 0x12, 0x0d, 0xaf, 0x89, 0x6b, 0x23, 0xd6, - 0xb4, 0xd9, 0x8c, 0x88, 0x58, 0xa1, 0xe8, 0xfa, 0x10, 0x16, 0x2d, 0xdb, 0x53, 0x49, 0x5f, 0xc7, - 0xa3, 0x00, 0xed, 0xd6, 0x0a, 0x7b, 0x32, 0xb7, 0xd5, 0xc0, 0xe7, 0x0b, 0x96, 0xed, 0xb5, 0x86, - 0x26, 0x6f, 0x85, 0x2a, 0x1f, 0x43, 0x31, 0xaa, 0x23, 0x94, 0x85, 0x64, 0xed, 0xa0, 0x51, 0x3b, - 0x94, 0x17, 0xd0, 0x22, 0xe4, 0xf6, 0xdb, 0x4a, 0xa3, 0xf9, 0xa8, 0xa5, 0x1e, 0x36, 0x7e, 0xc6, - 0x06, 0x53, 0xad, 0xb6, 0x18, 0x4c, 0xf9, 0x5d, 0x4e, 0x52, 0x4e, 0x55, 0xfe, 0x47, 0x82, 0xe2, - 0xb1, 0x63, 0xf4, 0x35, 0x67, 0x74, 0x88, 0x47, 0x27, 0xaf, 0xb4, 0x01, 0xfa, 0x14, 0x96, 0x2d, - 0xfc, 0x4a, 0x1d, 0x30, 0xa8, 0xea, 0x57, 0xcd, 0xd2, 0xf4, 0xa9, 0x65, 0xc9, 0xc2, 0xaf, 0xb8, - 0x84, 0x26, 0x2f, 0x9a, 0x3f, 0x80, 0x9c, 0x6d, 0xf2, 0xb6, 0x1c, 0x8b, 0xc9, 0x51, 0x2e, 0xcc, - 0x04, 0xb6, 0xc9, 0xba, 0x70, 0x9a, 0xc8, 0x73, 0x64, 0x3d, 0x41, 0x1d, 0x9f, 0x42, 0x6d, 0xe1, - 0x57, 0x82, 0xfa, 0x53, 0x58, 0x26, 0xb2, 0x27, 0x76, 0x97, 0x98, 0xb1, 0x3b, 0xdb, 0xd4, 0xa3, - 0xbb, 0xe3, 0xc6, 0xfb, 0x67, 0x12, 0xd0, 0xa0, 0x3d, 0xf4, 0xc4, 0x4c, 0x89, 0x1e, 0xfe, 0x87, - 0x50, 0x20, 0x9b, 0x09, 0x7a, 0x25, 0x69, 0xc6, 0x7d, 0x90, 0x3d, 0x8b, 0x08, 0x4c, 0xb8, 0xc8, - 0xa6, 0x02, 0xae, 0xd8, 0x2c, 0x2e, 0xdb, 0xd4, 0xc7, 0x5a, 0xa9, 0xbf, 0x4b, 0x01, 0x0a, 0x6c, - 0xf0, 0xc9, 0xd0, 0xd3, 0xa8, 0x63, 0x56, 0x21, 0xc5, 0x2d, 0x42, 0xa2, 0x96, 0xf6, 0xde, 0x4c, - 0xe7, 0x89, 0xce, 0x2b, 0x0e, 0x16, 0x14, 0xce, 0x88, 0x7e, 0x12, 0x9e, 0x37, 0xe7, 0x76, 0xdf, - 0x9d, 0x2f, 0x7c, 0x1f, 0x2c, 0x88, 0x41, 0xf4, 0x21, 0x24, 0x5d, 0x8f, 0x04, 0x92, 0x38, 0x0d, - 0xff, 0x3b, 0x33, 0xf8, 0x27, 0x37, 0xbf, 0x7d, 0x42, 0xd8, 0x84, 0xfb, 0x50, 0x19, 0xe8, 0x19, - 0x64, 0xfd, 0xaa, 0x87, 0x0f, 0xaf, 0x1f, 0xcc, 0x2f, 0xd0, 0x0f, 0x58, 0x22, 0x9c, 0xf9, 0xb2, - 0x50, 0x15, 0x72, 0x7d, 0x4e, 0x16, 0xf4, 0xb6, 0x9b, 0xbc, 0xf0, 0x04, 0x21, 0x81, 0x16, 0xa0, - 0xa1, 0x2f, 0x05, 0x04, 0x53, 0x93, 0x06, 0x67, 0xc7, 0x36, 0xcd, 0x33, 0xad, 0xf3, 0x9c, 0x0e, - 0xe4, 0xfc, 0xe0, 0x2c, 0xa0, 0xe8, 0x90, 0x94, 0x8f, 0xc2, 0xdd, 0xe8, 0x78, 0x2d, 0x37, 0xc7, - 0x08, 0x50, 0x84, 0xb3, 0x83, 0x05, 0x25, 0xc4, 0x8e, 0xda, 0x50, 0x1c, 0x44, 0x5c, 0x8e, 0xd7, - 0x6a, 0xf7, 0x66, 0x25, 0xec, 0x08, 0xf1, 0xc1, 0x82, 0x32, 0xc6, 0x8e, 0x7e, 0x1f, 0x50, 0x67, - 0xc2, 0x94, 0xe9, 0x58, 0xed, 0xba, 0x5d, 0x8e, 0x33, 0x1c, 0x2c, 0x28, 0x53, 0xc4, 0x54, 0x3e, - 0x85, 0x24, 0xbd, 0x4e, 0x92, 0x0f, 0x9e, 0xb6, 0x0e, 0x5b, 0xed, 0x67, 0x2d, 0x16, 0x62, 0xea, - 0x8d, 0xa3, 0xc6, 0x69, 0x43, 0x6d, 0xb7, 0x8e, 0x48, 0x88, 0xb9, 0x0d, 0x2b, 0x1c, 0x50, 0x6d, - 0xd5, 0xd5, 0x67, 0x4a, 0x53, 0xa0, 0x62, 0x95, 0xad, 0x70, 0xc2, 0xc9, 0x40, 0xa2, 0xd5, 0x6e, - 0x35, 0xe4, 0x05, 0x9a, 0x7a, 0xea, 0x75, 0x59, 0xa2, 0xa9, 0x47, 0x69, 0x1f, 0x8b, 0xc8, 0xb4, - 0x97, 0x07, 0xd0, 0x7d, 0x13, 0x78, 0x9c, 0xc8, 0xa4, 0xe4, 0x74, 0xe5, 0x4f, 0xef, 0xc2, 0xe2, - 0x58, 0xb8, 0xbe, 0xa6, 0x07, 0xd9, 0xa4, 0x3d, 0x48, 0x3c, 0x70, 0x42, 0xbf, 0x07, 0x89, 0xf1, - 0xf6, 0xe3, 0x01, 0x64, 0x07, 0x9a, 0x83, 0x2d, 0x2f, 0x88, 0x1d, 0x62, 0x46, 0x93, 0x39, 0xa6, - 0x08, 0x9f, 0x3c, 0xc3, 0x08, 0x9b, 0x84, 0xc9, 0x2f, 0x49, 0x98, 0x99, 0xdd, 0xe6, 0x0e, 0x5e, - 0xba, 0xa6, 0x1a, 0x39, 0x86, 0x52, 0xdf, 0xd6, 0x8d, 0x2e, 0x49, 0xbe, 0xc4, 0x46, 0x3d, 0xa3, - 0xcf, 0xc6, 0xbe, 0xb9, 0xdd, 0xef, 0x85, 0xee, 0x66, 0xe8, 0x19, 0xe6, 0xf6, 0xb9, 0xd9, 0xd9, - 0x3e, 0x15, 0x8f, 0x5a, 0xfc, 0x44, 0x72, 0x98, 0x9b, 0x20, 0xd1, 0x23, 0x48, 0x8b, 0x56, 0x3b, - 0x43, 0x0b, 0xdc, 0x79, 0x63, 0x83, 0x28, 0x4a, 0x38, 0x37, 0xda, 0x87, 0xa2, 0x85, 0x2f, 0xc2, - 0x93, 0xa1, 0x6c, 0xc4, 0x7b, 0xf2, 0x2d, 0x7c, 0x31, 0x7d, 0x2c, 0x94, 0xb7, 0x02, 0x8c, 0x8e, - 0x3e, 0x83, 0x42, 0x24, 0x1e, 0x73, 0xd3, 0x9b, 0x33, 0xe0, 0xf8, 0x95, 0x67, 0x28, 0x4c, 0xa3, - 0x7d, 0x48, 0x8b, 0x84, 0x90, 0xa3, 0x67, 0xbc, 0x99, 0x30, 0xc1, 0x8c, 0xf6, 0x48, 0x3c, 0xbf, - 0xf0, 0x82, 0x3c, 0x91, 0x0f, 0x6a, 0xc9, 0xab, 0xcb, 0x8d, 0x1c, 0x39, 0xe1, 0x94, 0xf9, 0x4f, - 0xce, 0xf2, 0xe1, 0x3a, 0x7a, 0x0c, 0xe0, 0x3f, 0x26, 0xba, 0x74, 0x42, 0x39, 0xbb, 0xa7, 0x38, - 0x16, 0x84, 0xc1, 0x96, 0x94, 0x10, 0x37, 0x7a, 0x02, 0x59, 0x11, 0x78, 0x58, 0xb5, 0x3f, 0xdb, - 0x43, 0x27, 0xc3, 0xa0, 0x08, 0x7e, 0xbe, 0x04, 0x52, 0xe8, 0x98, 0x58, 0x73, 0x31, 0x2f, 0xf9, - 0x1f, 0xce, 0x59, 0xe8, 0x9c, 0x74, 0xce, 0x71, 0x5f, 0xab, 0x9d, 0x6b, 0x56, 0x0f, 0x1f, 0x11, - 0xfe, 0xbd, 0x58, 0x59, 0x52, 0x98, 0x28, 0xd4, 0x02, 0x99, 0xaa, 0x2c, 0x1c, 0x55, 0x65, 0xaa, - 0xb5, 0x77, 0xb8, 0xd6, 0x8a, 0x44, 0x6b, 0x33, 0x23, 0x2b, 0xb5, 0xa9, 0x27, 0x41, 0x74, 0xfd, - 0x1d, 0x28, 0x76, 0x6d, 0xa7, 0xaf, 0x79, 0xaa, 0x70, 0x9e, 0x52, 0x30, 0x1c, 0xf8, 0xf6, 0x72, - 0xa3, 0xb0, 0x4f, 0xb1, 0xc2, 0x71, 0x0a, 0xdd, 0xf0, 0x27, 0x3a, 0x10, 0x49, 0x68, 0x89, 0xe6, - 0x8c, 0x0f, 0xe6, 0x3d, 0xe1, 0x64, 0x06, 0x6a, 0x41, 0x8a, 0x56, 0x72, 0x6e, 0x79, 0x99, 0xea, - 0xfd, 0x35, 0xab, 0x42, 0x85, 0x4b, 0x41, 0x3f, 0x87, 0xa2, 0x4e, 0x20, 0xa4, 0xcd, 0x60, 0xc3, - 0x87, 0x15, 0x2a, 0x77, 0x67, 0x4e, 0xb9, 0x2d, 0xad, 0x8f, 0x9b, 0x56, 0xd7, 0x16, 0x3d, 0xa7, - 0x10, 0xc6, 0x06, 0x16, 0x6d, 0xc8, 0x74, 0xb5, 0xbe, 0x61, 0x1a, 0xd8, 0x2d, 0xaf, 0x52, 0xb9, - 0x1f, 0x5e, 0xeb, 0xe5, 0xe3, 0x83, 0x69, 0x91, 0xc2, 0x84, 0x10, 0xdf, 0xd9, 0x29, 0x60, 0x44, - 0x2e, 0xf5, 0xd6, 0xa4, 0xb3, 0x8b, 0xc1, 0x74, 0x64, 0x48, 0x4d, 0x9d, 0x9d, 0x7f, 0xe9, 0xe8, - 0x2e, 0xc0, 0x4b, 0x03, 0xbf, 0x52, 0x5f, 0x0c, 0xb1, 0x33, 0x2a, 0x97, 0xc3, 0x3d, 0x06, 0x81, - 0x7f, 0x46, 0xc0, 0xe8, 0x23, 0xc8, 0xea, 0x78, 0x80, 0x2d, 0xdd, 0x6d, 0x5b, 0xe5, 0xdb, 0xb4, - 0xa2, 0x5b, 0xba, 0xba, 0xdc, 0xc8, 0xd6, 0x05, 0x90, 0xc7, 0xd6, 0x80, 0x0a, 0x7d, 0x01, 0x79, - 0xf6, 0x81, 0xf5, 0xb6, 0xb5, 0x37, 0x2a, 0xaf, 0xd1, 0x43, 0xdf, 0x9f, 0x53, 0x99, 0x41, 0x07, - 0xef, 0x0f, 0x3d, 0xeb, 0x21, 0x69, 0x4a, 0x44, 0x36, 0xfa, 0x39, 0xe4, 0x85, 0x75, 0x3f, 0xb6, - 0xcf, 0xdc, 0xf2, 0x5b, 0xd7, 0x4e, 0x24, 0xc7, 0xd7, 0x7a, 0x12, 0xb0, 0x8a, 0xd8, 0x15, 0x96, - 0x86, 0x7e, 0x0a, 0x05, 0xff, 0x01, 0xc4, 0x1e, 0x78, 0x6e, 0xf9, 0x6d, 0xea, 0x9c, 0x0f, 0xe6, - 0x35, 0x5d, 0xce, 0xdb, 0x1e, 0xd0, 0x61, 0x6d, 0xe8, 0x0b, 0xdd, 0x81, 0xac, 0xee, 0xd8, 0x03, - 0x96, 0x43, 0xbe, 0xb7, 0x29, 0x6d, 0xc5, 0xfd, 0x36, 0xd2, 0xb1, 0x07, 0x34, 0x39, 0xa8, 0x50, - 0x74, 0xf0, 0xc0, 0xd4, 0x3a, 0xb8, 0x4f, 0xb2, 0x9b, 0xdd, 0x2d, 0xaf, 0xd3, 0xd5, 0x77, 0xe7, - 0x56, 0xa4, 0xcf, 0x2c, 0x0c, 0x33, 0x24, 0xaf, 0xdd, 0x45, 0x4f, 0x01, 0xb4, 0xa1, 0x6e, 0x78, - 0x6a, 0xdf, 0xd6, 0x71, 0x79, 0xe3, 0xda, 0x77, 0xc1, 0x71, 0xe1, 0x55, 0xc2, 0xf8, 0xc4, 0xd6, - 0xb1, 0x3f, 0xde, 0x17, 0x00, 0xf4, 0x11, 0xe4, 0xe8, 0xd1, 0xbe, 0xb0, 0xcf, 0x88, 0x6d, 0x6e, - 0xd2, 0xc3, 0x95, 0xf8, 0x5d, 0x66, 0xeb, 0x8e, 0x3d, 0x78, 0x6c, 0x9f, 0x51, 0x8b, 0xe1, 0xff, - 0xea, 0xc8, 0x85, 0x7c, 0xaf, 0xa3, 0x06, 0xe1, 0xf4, 0x0e, 0xbd, 0xc5, 0x4f, 0xe6, 0xdc, 0xcb, - 0xa3, 0xda, 0x94, 0x00, 0xbb, 0x24, 0xf2, 0xc2, 0xa3, 0x9a, 0x80, 0xb9, 0x4a, 0xae, 0xd7, 0xf1, - 0x3f, 0xd0, 0x7b, 0x90, 0x67, 0xb3, 0x0a, 0xee, 0x00, 0x95, 0x90, 0x03, 0xe4, 0x18, 0x86, 0xb9, - 0x40, 0x0b, 0xf8, 0x50, 0x43, 0xd5, 0x5c, 0xd5, 0xee, 0xb2, 0x3b, 0xbb, 0x3b, 0x7f, 0xde, 0x2f, - 0x32, 0xee, 0xaa, 0xdb, 0xee, 0xd2, 0x8b, 0xed, 0x40, 0xde, 0x1e, 0x7a, 0x67, 0xf6, 0xd0, 0xd2, - 0xd5, 0xee, 0x73, 0xb7, 0xfc, 0x0e, 0x3d, 0xed, 0x8d, 0x1a, 0x50, 0xff, 0x74, 0x6d, 0x2e, 0x68, - 0xff, 0xd0, 0x55, 0x72, 0x42, 0xea, 0xfe, 0x73, 0x17, 0xfd, 0x11, 0xe4, 0x0c, 0x2b, 0x58, 0xe3, - 0xde, 0xcd, 0xd7, 0x40, 0xa2, 0xf2, 0x6e, 0x5a, 0xfe, 0x12, 0xc0, 0x65, 0x92, 0x15, 0xde, 0x87, - 0xa2, 0xdd, 0xed, 0x9a, 0x86, 0x85, 0x55, 0x07, 0x6b, 0xae, 0x6d, 0x95, 0xdf, 0x0d, 0x69, 0xb0, - 0xc0, 0x71, 0x0a, 0x45, 0xa1, 0x0a, 0x64, 0x3d, 0xdc, 0x1f, 0xd8, 0x8e, 0xe6, 0x8c, 0xca, 0xef, - 0x85, 0x5f, 0x45, 0x7c, 0x30, 0x3a, 0x83, 0xb5, 0xa1, 0x85, 0x2f, 0x06, 0xb6, 0x8b, 0x75, 0x95, - 0xd7, 0x74, 0x2e, 0xcd, 0x6f, 0xc4, 0x8e, 0xb6, 0x68, 0x8c, 0xbb, 0xc7, 0x37, 0x75, 0xeb, 0xa9, - 0xa0, 0x64, 0x35, 0x1e, 0xcb, 0x83, 0x7e, 0xa5, 0x77, 0x6b, 0x38, 0x15, 0xad, 0xaf, 0xfd, 0x52, - 0x82, 0xd2, 0x44, 0xce, 0x44, 0x7f, 0x08, 0x69, 0xcb, 0xd6, 0x43, 0x6f, 0x50, 0x0d, 0xbe, 0x4c, - 0xaa, 0x65, 0xeb, 0xec, 0x09, 0xea, 0x41, 0xcf, 0xf0, 0xce, 0x87, 0x67, 0xdb, 0x1d, 0xbb, 0xbf, - 0xe3, 0x2b, 0x51, 0x3f, 0x0b, 0xfe, 0xdf, 0x19, 0x3c, 0xef, 0xed, 0xd0, 0xff, 0x06, 0x67, 0xdb, - 0x8c, 0x4d, 0x49, 0x11, 0xa9, 0x4d, 0x1d, 0x7d, 0x08, 0x8b, 0xf8, 0x62, 0x60, 0x38, 0xa1, 0xba, - 0x31, 0x16, 0xf2, 0xf9, 0x62, 0x80, 0x24, 0x06, 0xc2, 0x5f, 0x09, 0xfe, 0x31, 0x06, 0x8b, 0x63, - 0x19, 0x8b, 0x14, 0xca, 0xf4, 0xc5, 0x36, 0x52, 0x28, 0x13, 0xc8, 0x35, 0x4f, 0x4a, 0xe1, 0x9f, - 0x3c, 0xc4, 0xdf, 0xf4, 0x27, 0x0f, 0xd1, 0xe9, 0x7b, 0xf2, 0x06, 0xd3, 0xf7, 0x1f, 0xc3, 0xaa, - 0xe1, 0xaa, 0x96, 0x6d, 0x89, 0x21, 0x89, 0xdf, 0x84, 0x85, 0x9f, 0xb5, 0x97, 0x0c, 0xb7, 0x65, - 0x5b, 0x6c, 0x3c, 0xe2, 0x9f, 0x3a, 0x78, 0x01, 0x4f, 0x4f, 0xbe, 0x80, 0xfb, 0x43, 0x90, 0x84, - 0x9c, 0x5c, 0xfb, 0x07, 0x09, 0x32, 0x22, 0x1b, 0x47, 0x3b, 0x03, 0x69, 0xce, 0xce, 0x60, 0xb6, - 0x1e, 0xf7, 0x41, 0x9e, 0x30, 0x4a, 0xd6, 0x98, 0xbc, 0x2d, 0xaa, 0xa9, 0xa9, 0xb6, 0x58, 0x1c, - 0x44, 0x4c, 0x90, 0xdf, 0xee, 0x57, 0x12, 0x64, 0xc3, 0x3f, 0x39, 0x8b, 0x45, 0x27, 0x14, 0x13, - 0x6d, 0xce, 0x6b, 0xbe, 0x7a, 0x46, 0xef, 0x2b, 0x3e, 0xff, 0x7d, 0xf1, 0x6d, 0xfe, 0x31, 0xe4, - 0x42, 0x49, 0x72, 0xbc, 0x45, 0x97, 0x5e, 0xa3, 0x45, 0x7f, 0x07, 0x52, 0x3c, 0x33, 0x30, 0x17, - 0x28, 0x70, 0xee, 0x24, 0xcb, 0x0a, 0xc9, 0x2f, 0x48, 0x46, 0xe0, 0xab, 0xff, 0x5b, 0x1c, 0xf2, - 0xe1, 0x24, 0x4a, 0xc2, 0x88, 0x61, 0x75, 0x1c, 0x9a, 0xc1, 0xe8, 0xea, 0x71, 0xff, 0x71, 0x55, - 0x80, 0x49, 0x6a, 0xed, 0x1b, 0x96, 0x4a, 0x1f, 0xf4, 0x22, 0x6e, 0x96, 0xe9, 0x1b, 0xd6, 0xe7, - 0x04, 0x4a, 0x49, 0xb4, 0x0b, 0x4e, 0x12, 0x8f, 0x90, 0x68, 0x17, 0x8c, 0x64, 0x8d, 0x56, 0xab, - 0x8e, 0x47, 0x5b, 0xca, 0x78, 0xa8, 0xfe, 0x74, 0x3c, 0xb4, 0x0e, 0xe9, 0x97, 0x86, 0xe3, 0x0d, - 0x35, 0x93, 0x76, 0x8f, 0xc2, 0x20, 0x05, 0x10, 0x59, 0x50, 0x0c, 0xca, 0x86, 0x57, 0x16, 0x76, - 0xa8, 0x89, 0xe7, 0x76, 0xab, 0xaf, 0x51, 0x37, 0x04, 0x1f, 0x44, 0x90, 0x08, 0xae, 0x6e, 0x18, - 0xb8, 0xf6, 0xb7, 0x12, 0x14, 0x22, 0x64, 0xa8, 0x09, 0x8b, 0x74, 0xe1, 0x89, 0xf1, 0xd7, 0x1d, - 0xff, 0xc7, 0x63, 0x04, 0x3d, 0xb5, 0x23, 0x2c, 0xd8, 0x21, 0x94, 0x8e, 0x3e, 0x85, 0x22, 0x13, - 0xe5, 0x3f, 0xd3, 0x47, 0xcd, 0x2f, 0x4f, 0x25, 0x45, 0xdf, 0xea, 0xf3, 0x76, 0x00, 0xd3, 0xc3, - 0x2f, 0x90, 0x6b, 0x16, 0xe4, 0x42, 0x75, 0xc9, 0x1c, 0x76, 0xff, 0x23, 0x48, 0xf8, 0xf1, 0x72, - 0xce, 0x7c, 0x4b, 0x19, 0xf8, 0x7a, 0x5f, 0x4a, 0xb0, 0x3c, 0xad, 0x3e, 0x88, 0xf8, 0x13, 0x33, - 0xa4, 0xb9, 0xfc, 0xe9, 0x6e, 0xb8, 0x6e, 0x63, 0xc6, 0x25, 0x5e, 0xc5, 0x82, 0xca, 0xed, 0x5d, - 0xdf, 0xc4, 0x99, 0x6d, 0x2d, 0x46, 0x4c, 0x9c, 0xf4, 0x67, 0x21, 0x23, 0xaf, 0x3c, 0x10, 0x63, - 0x19, 0x80, 0xd4, 0xf1, 0xd3, 0xbd, 0xa3, 0x66, 0x6d, 0xea, 0x48, 0x05, 0xe5, 0x20, 0xdd, 0xde, - 0xdf, 0x3f, 0x6a, 0xb6, 0x1a, 0x72, 0xbc, 0xb2, 0x05, 0x59, 0xbf, 0x04, 0x43, 0x79, 0xc8, 0xd4, - 0x9b, 0x27, 0xd5, 0xbd, 0xa3, 0x46, 0x5d, 0x5e, 0x40, 0x05, 0xc8, 0x2a, 0x8d, 0x6a, 0x9d, 0x0e, - 0x6e, 0x64, 0xe9, 0xe3, 0xcc, 0x2f, 0xbe, 0xdc, 0x90, 0x78, 0x88, 0x4c, 0xc9, 0xe9, 0xc7, 0x89, - 0x0c, 0x92, 0x97, 0x2a, 0x7f, 0x2f, 0x01, 0xaa, 0x6b, 0x9e, 0x46, 0xec, 0xef, 0x06, 0x83, 0x98, - 0xd8, 0x35, 0x37, 0x15, 0x6d, 0xae, 0xe3, 0x6f, 0xd2, 0x5c, 0x07, 0x9b, 0xae, 0xfc, 0x79, 0x12, - 0x8a, 0xa7, 0xa3, 0x41, 0x78, 0x93, 0xaf, 0x15, 0xd7, 0xa7, 0x45, 0xef, 0xd8, 0xcd, 0xa3, 0xf7, - 0x35, 0xbf, 0xe9, 0x65, 0x1a, 0x4a, 0x5c, 0xa3, 0xa1, 0x3a, 0x24, 0x9e, 0x1b, 0x16, 0x9b, 0x6c, - 0x16, 0x67, 0xea, 0x26, 0x7a, 0xda, 0xed, 0x43, 0xc3, 0xd2, 0xc5, 0x3a, 0x84, 0x1b, 0xfd, 0x0c, - 0xf2, 0xd8, 0x1a, 0xf6, 0xd5, 0x3e, 0xee, 0x9f, 0x61, 0x87, 0x3d, 0x81, 0x5f, 0xd3, 0x5e, 0x45, - 0xa5, 0x35, 0xac, 0x61, 0xff, 0x09, 0x65, 0x14, 0x95, 0x2e, 0xf6, 0x21, 0x2e, 0xba, 0x0f, 0x49, - 0xcd, 0x34, 0x34, 0x97, 0x4f, 0xb5, 0xae, 0xfb, 0x81, 0x1b, 0x23, 0x44, 0xbf, 0x0b, 0x05, 0xcd, - 0x71, 0xb4, 0x11, 0xff, 0xa1, 0x96, 0x4e, 0x27, 0xaa, 0x7c, 0x9c, 0x46, 0xea, 0xd3, 0x2a, 0x41, - 0xd2, 0xdf, 0x66, 0x09, 0x45, 0xe4, 0x34, 0x1f, 0xa4, 0xaf, 0xfd, 0x42, 0x02, 0x08, 0xb6, 0x84, - 0x7e, 0x04, 0xb7, 0x06, 0xe7, 0x23, 0x97, 0xfe, 0x84, 0xcc, 0xc1, 0x03, 0x07, 0xbb, 0xd8, 0x62, - 0xfe, 0x4a, 0xef, 0x39, 0xaf, 0xac, 0x0a, 0xb4, 0x12, 0xc1, 0xa2, 0x4f, 0x60, 0x55, 0xfc, 0xf4, - 0x6c, 0x8c, 0x2f, 0x9c, 0xc7, 0x57, 0x38, 0x4d, 0x94, 0x99, 0xbb, 0xe1, 0x5b, 0x90, 0x20, 0xaa, - 0x26, 0x0e, 0xd7, 0x68, 0x3d, 0x7d, 0x22, 0x2f, 0xa0, 0x2c, 0x24, 0xab, 0x47, 0xcd, 0xea, 0x49, - 0xd8, 0x89, 0x2a, 0xff, 0x21, 0x81, 0xcc, 0x8c, 0xe1, 0x4d, 0x2d, 0x72, 0x76, 0xa5, 0xf1, 0x9b, - 0x87, 0x9e, 0x51, 0x5f, 0x4b, 0x7c, 0x47, 0xbe, 0xf6, 0x55, 0x0c, 0x20, 0x74, 0xaa, 0x9f, 0x84, - 0x7f, 0x07, 0x3f, 0x7b, 0x6e, 0x37, 0x96, 0xbd, 0x0e, 0x16, 0xc4, 0xaf, 0xe4, 0x1f, 0x41, 0x46, - 0xe7, 0x21, 0x86, 0x87, 0xef, 0x99, 0x03, 0xb2, 0x89, 0x48, 0x74, 0x40, 0x3a, 0x61, 0x0e, 0x45, - 0x9f, 0x44, 0x7e, 0x76, 0x79, 0x6f, 0x2e, 0x4b, 0x3f, 0x10, 0x4f, 0xd6, 0x55, 0x48, 0x31, 0x8f, - 0xe7, 0x6a, 0x9a, 0x35, 0x62, 0x1d, 0xbf, 0xd4, 0x83, 0x05, 0x85, 0x33, 0xf2, 0x61, 0x76, 0x1a, - 0x92, 0x43, 0xcb, 0xb0, 0xad, 0x1f, 0x28, 0xe1, 0xc7, 0x52, 0x51, 0x29, 0x93, 0xb0, 0x4b, 0xff, - 0xd7, 0x3c, 0xac, 0xb3, 0xb1, 0xfa, 0x53, 0xeb, 0xa5, 0x0f, 0x90, 0x50, 0x11, 0x80, 0xe3, 0x0d, - 0xab, 0x27, 0xc7, 0x68, 0xd0, 0x76, 0xec, 0xc1, 0x80, 0x7c, 0xc5, 0xf7, 0xbe, 0xff, 0xf5, 0x7f, - 0xad, 0x2f, 0x7c, 0x7d, 0xb5, 0x2e, 0xfd, 0xea, 0x6a, 0x5d, 0xfa, 0xf5, 0xd5, 0xba, 0xf4, 0x9f, - 0x57, 0xeb, 0xd2, 0x5f, 0x7e, 0xb3, 0xbe, 0xf0, 0xab, 0x6f, 0xd6, 0x17, 0x7e, 0xfd, 0xcd, 0xfa, - 0xc2, 0xef, 0xa5, 0xf9, 0x46, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x5c, 0xaa, 0x6e, 0x6b, - 0x31, 0x00, 0x00, + proto.RegisterFile("sql/sqlbase/structured.proto", fileDescriptor_structured_8ef81714cff0efcf) +} + +var fileDescriptor_structured_8ef81714cff0efcf = []byte{ + // 4241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x5b, 0xc9, 0x6f, 0x23, 0x67, + 0x76, 0x67, 0x71, 0xe7, 0xe3, 0xa2, 0xd2, 0xa7, 0xa5, 0xd9, 0xb2, 0x2d, 0xa9, 0xd9, 0x6e, 0x5b, + 0x33, 0xb6, 0xa5, 0xb6, 0x7a, 0x92, 0xf1, 0xd8, 0xc9, 0xc0, 0x14, 0x49, 0xb5, 0xd8, 0x52, 0x93, + 0x72, 0x49, 0xed, 0x9e, 0xc9, 0x56, 0x29, 0xb1, 0x3e, 0x52, 0xe5, 0x2e, 0x56, 0xb1, 0xab, 0x8a, + 0xdd, 0x12, 0x90, 0x53, 0x0e, 0xc1, 0x9c, 0x82, 0x5c, 0x92, 0x5b, 0x00, 0x23, 0x31, 0x90, 0x39, + 0x05, 0xc8, 0x25, 0xb9, 0x05, 0xc8, 0xcd, 0x39, 0x65, 0x80, 0x5c, 0xe6, 0x24, 0x24, 0xf2, 0x25, + 0x7f, 0x41, 0x02, 0x38, 0x97, 0xe0, 0xdb, 0x6a, 0xe1, 0xa2, 0xa1, 0xa4, 0xce, 0xa5, 0xa1, 0x7a, + 0xef, 0x7b, 0xef, 0xdb, 0xde, 0xfb, 0xbd, 0xe5, 0x63, 0xc3, 0xdb, 0xee, 0x4b, 0x73, 0xcb, 0x7d, + 0x69, 0x9e, 0x68, 0x2e, 0xde, 0x72, 0x3d, 0x67, 0xd8, 0xf1, 0x86, 0x0e, 0xd6, 0x37, 0x07, 0x8e, + 0xed, 0xd9, 0x68, 0xa9, 0x63, 0x77, 0x5e, 0x38, 0xb6, 0xd6, 0x39, 0xdd, 0x74, 0x5f, 0x9a, 0x9b, + 0x7c, 0xdc, 0x4a, 0x79, 0xe8, 0x19, 0xe6, 0xd6, 0xa9, 0xd9, 0xd9, 0xf2, 0x8c, 0x3e, 0x76, 0x3d, + 0xad, 0x3f, 0x60, 0x02, 0x2b, 0x6f, 0x85, 0xd5, 0x0d, 0x1c, 0xe3, 0x95, 0x61, 0xe2, 0x1e, 0xe6, + 0xcc, 0x25, 0xc2, 0xf4, 0xce, 0x07, 0xd8, 0x65, 0xff, 0x72, 0xf2, 0xdd, 0x1e, 0xb6, 0xb7, 0x7a, + 0xd8, 0x36, 0x2c, 0x1d, 0x9f, 0x6d, 0x75, 0x6c, 0xab, 0x6b, 0xf4, 0x38, 0x6b, 0xb1, 0x67, 0xf7, + 0x6c, 0xfa, 0xe7, 0x16, 0xf9, 0x8b, 0x51, 0x2b, 0x7f, 0x9a, 0x82, 0x85, 0x5d, 0xdb, 0xc1, 0x46, + 0xcf, 0xda, 0xc7, 0xe7, 0x0a, 0xee, 0x62, 0x07, 0x5b, 0x1d, 0x8c, 0xd6, 0x21, 0xe5, 0x69, 0x27, + 0x26, 0x2e, 0x4b, 0xeb, 0xd2, 0x46, 0x71, 0x07, 0xbe, 0xbd, 0x58, 0x8b, 0x7d, 0x7f, 0xb1, 0x16, + 0x6f, 0xd6, 0x15, 0xc6, 0x40, 0x0f, 0x20, 0x45, 0x67, 0x29, 0xc7, 0xe9, 0x88, 0x39, 0x3e, 0x22, + 0xd3, 0x24, 0x44, 0x32, 0x8c, 0x72, 0x51, 0x19, 0x92, 0x96, 0xd6, 0xc7, 0xe5, 0xc4, 0xba, 0xb4, + 0x91, 0xdb, 0x49, 0x92, 0x51, 0x0a, 0xa5, 0xa0, 0x7d, 0xc8, 0xbe, 0xd2, 0x4c, 0x43, 0x37, 0xbc, + 0xf3, 0x72, 0x72, 0x5d, 0xda, 0x28, 0x6d, 0xff, 0x60, 0x73, 0xe2, 0x19, 0x6d, 0xd6, 0x6c, 0xcb, + 0xf5, 0x1c, 0xcd, 0xb0, 0xbc, 0x2f, 0xb9, 0x00, 0x57, 0xe4, 0x2b, 0x40, 0x0f, 0x61, 0xde, 0x3d, + 0xd5, 0x1c, 0xac, 0xab, 0x03, 0x07, 0x77, 0x8d, 0x33, 0xd5, 0xc4, 0x56, 0x39, 0xb5, 0x2e, 0x6d, + 0xa4, 0xf8, 0xd0, 0x39, 0xc6, 0x3e, 0xa4, 0xdc, 0x03, 0x6c, 0xa1, 0x63, 0xc8, 0xd9, 0x96, 0xaa, + 0x63, 0x13, 0x7b, 0xb8, 0x9c, 0xa6, 0xf3, 0x7f, 0x3c, 0x65, 0xfe, 0x09, 0x07, 0xb4, 0x59, 0xed, + 0x78, 0x86, 0x6d, 0x89, 0x75, 0xd8, 0x56, 0x9d, 0x2a, 0xe2, 0x5a, 0x87, 0x03, 0x5d, 0xf3, 0x70, + 0x39, 0x73, 0x6b, 0xad, 0xcf, 0xa8, 0x22, 0x74, 0x00, 0xa9, 0xbe, 0xe6, 0x75, 0x4e, 0xcb, 0x59, + 0xaa, 0xf1, 0xe1, 0x35, 0x34, 0x3e, 0x25, 0x72, 0x5c, 0x21, 0x53, 0x52, 0x79, 0x0e, 0x69, 0x36, + 0x0f, 0x2a, 0x42, 0xae, 0xd5, 0x56, 0xab, 0xb5, 0xe3, 0x66, 0xbb, 0x25, 0xc7, 0x50, 0x01, 0xb2, + 0x4a, 0xe3, 0xe8, 0x58, 0x69, 0xd6, 0x8e, 0x65, 0x89, 0x7c, 0x1d, 0x35, 0x8e, 0xd5, 0xd6, 0xb3, + 0x83, 0x03, 0x39, 0x8e, 0xe6, 0x20, 0x4f, 0xbe, 0xea, 0x8d, 0xdd, 0xea, 0xb3, 0x83, 0x63, 0x39, + 0x81, 0xf2, 0x90, 0xa9, 0x55, 0x8f, 0x6a, 0xd5, 0x7a, 0x43, 0x4e, 0xae, 0x24, 0x7f, 0xf9, 0xcd, + 0x6a, 0xac, 0xf2, 0x10, 0x52, 0x74, 0x3a, 0x04, 0x90, 0x3e, 0x6a, 0x3e, 0x3d, 0x3c, 0x68, 0xc8, + 0x31, 0x94, 0x85, 0xe4, 0x2e, 0x51, 0x21, 0x11, 0x89, 0xc3, 0xaa, 0x72, 0xdc, 0xac, 0x1e, 0xc8, + 0x71, 0x26, 0xf1, 0x69, 0xf2, 0xbf, 0xbe, 0x5e, 0x93, 0x2a, 0xff, 0x9e, 0x86, 0xc5, 0x60, 0xed, + 0xc1, 0x6d, 0xa3, 0x1a, 0xcc, 0xd9, 0x8e, 0xd1, 0x33, 0x2c, 0x95, 0xda, 0x9c, 0x6a, 0xe8, 0xdc, + 0x1e, 0xdf, 0x22, 0xfb, 0xb9, 0xbc, 0x58, 0x2b, 0xb6, 0x29, 0xfb, 0x98, 0x70, 0x9b, 0x75, 0x6e, + 0xa0, 0x45, 0x3b, 0x44, 0xd4, 0xd1, 0x3e, 0xcc, 0x73, 0x25, 0x1d, 0xdb, 0x1c, 0xf6, 0x2d, 0xd5, + 0xd0, 0xdd, 0x72, 0x7c, 0x3d, 0xb1, 0x51, 0xdc, 0x59, 0xbb, 0xbc, 0x58, 0x9b, 0x63, 0x2a, 0x6a, + 0x94, 0xd7, 0xac, 0xbb, 0xdf, 0x5f, 0xac, 0x65, 0xc5, 0x87, 0xc2, 0xa7, 0xe7, 0xdf, 0xba, 0x8b, + 0x9e, 0xc3, 0x92, 0x23, 0xce, 0x56, 0x0f, 0x2b, 0x4c, 0x50, 0x85, 0xf7, 0x2f, 0x2f, 0xd6, 0x16, + 0xfc, 0xc3, 0xd7, 0x27, 0x2b, 0x5d, 0x70, 0x46, 0x07, 0xe8, 0x2e, 0x6a, 0x43, 0x88, 0x1c, 0x6c, + 0x37, 0x49, 0xb7, 0xbb, 0xc6, 0xb7, 0x3b, 0x1f, 0xa8, 0x8e, 0x6e, 0x79, 0xde, 0x19, 0x61, 0xe8, + 0xbe, 0xe3, 0xa5, 0xae, 0x74, 0xbc, 0xf4, 0x6d, 0x1d, 0x2f, 0xe2, 0x46, 0x99, 0xff, 0x17, 0x37, + 0xca, 0xbe, 0x71, 0x37, 0xca, 0xbd, 0x01, 0x37, 0x42, 0x55, 0x58, 0x30, 0x71, 0x4f, 0xeb, 0x9c, + 0xab, 0xdc, 0xbc, 0x18, 0x1c, 0x02, 0xbd, 0xb1, 0xf9, 0x11, 0x38, 0x2c, 0x4b, 0xca, 0x3c, 0x1b, + 0xcd, 0xcc, 0x8d, 0x92, 0x51, 0x13, 0xee, 0x70, 0x15, 0xa1, 0xbb, 0x67, 0x6a, 0xf2, 0xd3, 0xd4, + 0x2c, 0x31, 0x89, 0xc0, 0x12, 0x28, 0x8b, 0x79, 0xd2, 0x93, 0x64, 0xb6, 0x20, 0x17, 0x9f, 0x24, + 0xb3, 0x45, 0xb9, 0x54, 0xf9, 0xab, 0x24, 0xc8, 0xcc, 0xbe, 0xea, 0xd8, 0xed, 0x38, 0xc6, 0xc0, + 0xb3, 0x1d, 0xdf, 0x2a, 0xa4, 0x31, 0xab, 0x78, 0x0f, 0xe2, 0x86, 0xce, 0xc1, 0x7c, 0x99, 0xdb, + 0x5b, 0x9c, 0x1a, 0x58, 0x60, 0xb9, 0x71, 0x43, 0x47, 0x9b, 0x90, 0x24, 0x11, 0x87, 0x02, 0x7a, + 0x7e, 0x7b, 0x65, 0xf4, 0x0c, 0x71, 0x7f, 0x93, 0x05, 0xa4, 0x63, 0x85, 0x8e, 0x43, 0xeb, 0x90, + 0xb5, 0x86, 0xa6, 0x49, 0x83, 0x09, 0xb1, 0xe6, 0xac, 0xb8, 0x16, 0x41, 0x45, 0xf7, 0xa0, 0xa0, + 0xe3, 0xae, 0x36, 0x34, 0x3d, 0x15, 0x9f, 0x0d, 0x1c, 0x66, 0xb1, 0x4a, 0x9e, 0xd3, 0x1a, 0x67, + 0x03, 0x07, 0xbd, 0x0d, 0xe9, 0x53, 0x43, 0xd7, 0xb1, 0x45, 0x0d, 0x56, 0xa8, 0xe0, 0x34, 0xb4, + 0x0d, 0xf3, 0x43, 0x17, 0xbb, 0xaa, 0x8b, 0x5f, 0x0e, 0xc9, 0x91, 0x50, 0x87, 0x04, 0xea, 0x90, + 0x69, 0xee, 0x20, 0x73, 0x64, 0xc0, 0x11, 0xe7, 0x13, 0x7f, 0xbb, 0x07, 0x85, 0x8e, 0xdd, 0x1f, + 0x0c, 0x3d, 0xcc, 0x26, 0xcd, 0xb3, 0x49, 0x39, 0x8d, 0x4e, 0xba, 0x0d, 0xf3, 0xf6, 0x6b, 0x6b, + 0x44, 0x6d, 0x21, 0xaa, 0x96, 0x0c, 0x08, 0xab, 0xdd, 0x01, 0x30, 0xed, 0x9e, 0xd1, 0xd1, 0x4c, + 0xe2, 0xbd, 0x45, 0x7a, 0x9a, 0xf7, 0xf9, 0x69, 0xce, 0x1d, 0x30, 0x8e, 0x38, 0xce, 0xc8, 0xd1, + 0xe6, 0xb8, 0x58, 0x53, 0x47, 0xbb, 0xf0, 0x8e, 0x66, 0x7a, 0xd8, 0x11, 0xf0, 0x42, 0x8e, 0x51, + 0x35, 0x2c, 0x75, 0xe0, 0xd8, 0x3d, 0x07, 0xbb, 0x6e, 0xb9, 0x14, 0x3a, 0x83, 0xbb, 0x74, 0x28, + 0x53, 0x73, 0x7c, 0x3e, 0xc0, 0x4d, 0xeb, 0x90, 0x0f, 0xf3, 0x4d, 0x22, 0x2b, 0xe7, 0x9e, 0x24, + 0xb3, 0x39, 0x19, 0x9e, 0x24, 0xb3, 0x19, 0x39, 0x5b, 0xf9, 0xf3, 0x38, 0x2c, 0x33, 0x81, 0x5d, + 0xad, 0x6f, 0x98, 0xe7, 0xb7, 0x35, 0x0f, 0xa6, 0x85, 0x9b, 0x07, 0x3d, 0x57, 0xba, 0x6c, 0x22, + 0xc6, 0x70, 0x91, 0x9e, 0x2b, 0xa1, 0xb5, 0x08, 0x09, 0x7d, 0x02, 0x10, 0x02, 0xce, 0x24, 0x3d, + 0xd0, 0xbb, 0x97, 0x17, 0x6b, 0xb9, 0xc9, 0x70, 0x99, 0xeb, 0x84, 0x40, 0x72, 0x5e, 0x58, 0x8a, + 0xaf, 0x81, 0x9a, 0x4b, 0xe8, 0x90, 0xeb, 0x6c, 0xc0, 0xc4, 0x43, 0x9e, 0xd3, 0x23, 0x4c, 0x9d, + 0xc7, 0x9f, 0x7f, 0x8a, 0xc3, 0x62, 0xd3, 0xf2, 0xb0, 0x63, 0x62, 0xed, 0x15, 0x0e, 0x1d, 0xc7, + 0xcf, 0x20, 0xa7, 0x59, 0x1d, 0xec, 0x7a, 0xb6, 0xe3, 0x96, 0xa5, 0xf5, 0xc4, 0x46, 0x7e, 0xfb, + 0x47, 0x53, 0x40, 0x63, 0x92, 0xfc, 0x66, 0x95, 0x0b, 0xf3, 0x93, 0x0c, 0x94, 0xad, 0xfc, 0xb3, + 0x04, 0x59, 0xc1, 0x45, 0x0f, 0x21, 0x3b, 0x12, 0xdf, 0x96, 0xf8, 0x6e, 0x32, 0x51, 0x98, 0xcf, + 0x78, 0x1c, 0xdc, 0x7f, 0x0b, 0xb2, 0x14, 0x26, 0x54, 0xff, 0x4e, 0x56, 0x84, 0x04, 0x47, 0x8a, + 0x70, 0x2a, 0x96, 0xa1, 0x63, 0x9b, 0x3a, 0xaa, 0x4d, 0xca, 0x92, 0x12, 0x54, 0xfe, 0x8e, 0x38, + 0xbf, 0xa3, 0x68, 0x9e, 0x34, 0x96, 0x38, 0xb1, 0x33, 0xe3, 0x27, 0xf7, 0x8f, 0x12, 0xcc, 0x13, + 0x01, 0x1d, 0xeb, 0xa1, 0x63, 0xbb, 0x0f, 0x60, 0xb8, 0xaa, 0xcb, 0xe8, 0x74, 0x47, 0xc2, 0x5a, + 0x73, 0x86, 0xcb, 0x87, 0xfb, 0xa6, 0x16, 0x1f, 0x33, 0xb5, 0x9f, 0x40, 0x91, 0xca, 0xaa, 0x27, + 0xc3, 0xce, 0x0b, 0xec, 0xb9, 0x74, 0x85, 0xa9, 0x9d, 0x45, 0xbe, 0xc2, 0x02, 0xd5, 0xb0, 0xc3, + 0x78, 0x4a, 0xc1, 0x0d, 0x7d, 0x8d, 0x59, 0x5f, 0x72, 0xcc, 0xfa, 0xf8, 0xc2, 0xff, 0x27, 0x01, + 0xcb, 0x87, 0x9a, 0xe3, 0x19, 0x24, 0x50, 0x18, 0x56, 0x2f, 0xb4, 0xfa, 0x07, 0x90, 0xb7, 0x86, + 0x7d, 0x6e, 0x60, 0x2e, 0xbf, 0x10, 0xb6, 0x3e, 0xb0, 0x86, 0x7d, 0x66, 0x3b, 0x2e, 0x3a, 0x80, + 0xa4, 0x69, 0xb8, 0x1e, 0xcd, 0x24, 0xf2, 0xdb, 0xdb, 0x53, 0xcc, 0x62, 0xf2, 0x1c, 0x9b, 0x07, + 0x86, 0xeb, 0x89, 0x3d, 0x13, 0x2d, 0xa8, 0x0d, 0x29, 0x47, 0xb3, 0x7a, 0x98, 0xfa, 0x4b, 0x7e, + 0xfb, 0xd1, 0xf5, 0xd4, 0x29, 0x44, 0x54, 0x44, 0x27, 0xaa, 0x67, 0xe5, 0xaf, 0x25, 0x48, 0x92, + 0x59, 0xae, 0x70, 0xe9, 0x65, 0x48, 0xbf, 0xd2, 0xcc, 0x21, 0x66, 0xd9, 0x50, 0x41, 0xe1, 0x5f, + 0xe8, 0x0f, 0x61, 0xce, 0x1d, 0x9e, 0x0c, 0x42, 0x53, 0x71, 0xb0, 0xff, 0xe8, 0x5a, 0xab, 0xf2, + 0x13, 0xef, 0xa8, 0x2e, 0x76, 0x01, 0x2b, 0x2f, 0x21, 0x45, 0x57, 0x7d, 0xc5, 0xfa, 0xee, 0x41, + 0xc1, 0xb3, 0x55, 0x7c, 0xd6, 0x31, 0x87, 0xae, 0xf1, 0x8a, 0x59, 0x4a, 0x41, 0xc9, 0x7b, 0x76, + 0x43, 0x90, 0xd0, 0x03, 0x28, 0x75, 0x1d, 0xbb, 0xaf, 0x1a, 0x96, 0x18, 0x94, 0xa0, 0x83, 0x8a, + 0x84, 0xda, 0x14, 0xc4, 0x88, 0xc9, 0xfe, 0x65, 0x01, 0xe6, 0xa8, 0x63, 0xcc, 0x04, 0x7b, 0x0f, + 0x42, 0xb0, 0xb7, 0x14, 0x81, 0x3d, 0xdf, 0xbb, 0x08, 0xea, 0xbd, 0x0d, 0xe9, 0xa1, 0x65, 0xbc, + 0x1c, 0xb2, 0xf9, 0xfd, 0xf8, 0xc4, 0x68, 0x33, 0x58, 0x25, 0xfa, 0x10, 0x10, 0x81, 0x02, 0xac, + 0x46, 0x06, 0xa6, 0xe8, 0x40, 0x99, 0x72, 0x6a, 0x53, 0x11, 0x34, 0x7d, 0x0d, 0x04, 0xdd, 0x03, + 0x19, 0x9f, 0x79, 0x8e, 0x16, 0x4e, 0x5d, 0x33, 0x54, 0x7e, 0xf5, 0xf2, 0x62, 0xad, 0xd4, 0x20, + 0xbc, 0xc9, 0x4a, 0x4a, 0x38, 0xc4, 0xd3, 0x89, 0x95, 0xcc, 0x73, 0x1d, 0xba, 0xe1, 0x60, 0x9a, + 0x70, 0xb9, 0xe5, 0xec, 0x7a, 0xe2, 0x8a, 0xc4, 0x6a, 0xe4, 0xd8, 0x37, 0xeb, 0x42, 0x50, 0x91, + 0x99, 0x2a, 0x9f, 0xe0, 0xa2, 0x23, 0xc8, 0x77, 0x59, 0x1e, 0xa6, 0xbe, 0xc0, 0xe7, 0x34, 0x63, + 0xcb, 0x6f, 0xff, 0x70, 0xf6, 0x8c, 0x6d, 0x27, 0x4d, 0xae, 0xa0, 0x2c, 0x29, 0xd0, 0xf5, 0x99, + 0xe8, 0x39, 0x14, 0x43, 0x89, 0xd6, 0xc9, 0x39, 0x4d, 0x12, 0x6e, 0xa6, 0xb6, 0x10, 0x28, 0xda, + 0x39, 0x47, 0x5f, 0x00, 0x18, 0x7e, 0x00, 0xa0, 0xb9, 0x44, 0x7e, 0xfb, 0x83, 0x6b, 0x44, 0x0a, + 0x81, 0x2f, 0x81, 0x12, 0xf4, 0x1c, 0x4a, 0xc1, 0x17, 0x5d, 0x6c, 0xe1, 0xda, 0x8b, 0x65, 0x5a, + 0x8b, 0x21, 0x3d, 0x3b, 0x24, 0x63, 0x5f, 0x24, 0x59, 0x8e, 0xed, 0x1a, 0x1e, 0x0e, 0x9b, 0x41, + 0x91, 0x9a, 0x41, 0xe5, 0xf2, 0x62, 0x0d, 0xd5, 0x04, 0x7f, 0xb2, 0x29, 0xa0, 0xce, 0x08, 0x9f, + 0x19, 0x56, 0xc4, 0x80, 0x89, 0xc6, 0x52, 0x60, 0x58, 0x47, 0x81, 0x09, 0x8f, 0x19, 0x56, 0xc8, + 0xbc, 0x59, 0x89, 0x55, 0x88, 0x60, 0xcf, 0xdc, 0xcd, 0xb1, 0x27, 0xa2, 0x08, 0x35, 0x78, 0xe6, + 0x2a, 0xd3, 0xec, 0xff, 0x83, 0x19, 0x8d, 0x94, 0x24, 0x55, 0x02, 0x12, 0x68, 0x42, 0xfb, 0x08, + 0x50, 0xc7, 0xc1, 0x9a, 0x87, 0x75, 0x92, 0x39, 0x9a, 0x46, 0xc7, 0xf0, 0xcc, 0xf3, 0xf2, 0x7c, + 0xc8, 0xef, 0xe7, 0x39, 0xbf, 0xe1, 0xb3, 0xd1, 0x27, 0x90, 0x79, 0x85, 0x1d, 0xd7, 0xb0, 0xad, + 0x32, 0xa2, 0x60, 0xb2, 0xca, 0x33, 0xfb, 0xe5, 0x91, 0xf9, 0xbe, 0x64, 0xa3, 0x14, 0x31, 0x1c, + 0xed, 0x41, 0x11, 0x5b, 0x1d, 0x5b, 0x37, 0xac, 0x1e, 0xcd, 0x04, 0xcb, 0x0b, 0x41, 0xbe, 0xf3, + 0xfd, 0xc5, 0xda, 0x5b, 0x23, 0xf2, 0x0d, 0x3e, 0x96, 0x2c, 0x5b, 0x29, 0xe0, 0xd0, 0x17, 0xda, + 0x83, 0x8c, 0x88, 0xc9, 0x8b, 0xf4, 0x4c, 0x37, 0xa6, 0x1c, 0xc1, 0x58, 0x44, 0xe7, 0xfb, 0x12, + 0xe2, 0x24, 0xa7, 0xd7, 0x0d, 0x97, 0xe4, 0x22, 0x7a, 0x79, 0x29, 0x9c, 0xd3, 0x0b, 0x2a, 0xaa, + 0x01, 0xf4, 0xb0, 0xad, 0xb2, 0x0e, 0x54, 0x79, 0x99, 0x4e, 0xb7, 0x1a, 0x9a, 0xae, 0x87, 0xed, + 0x4d, 0xd1, 0xa7, 0x22, 0x65, 0x66, 0xd7, 0xe8, 0x89, 0x14, 0xa1, 0x87, 0x6d, 0x46, 0x40, 0x15, + 0xc8, 0x0d, 0x1c, 0xac, 0x1b, 0x1d, 0x52, 0x05, 0xde, 0x09, 0x61, 0x73, 0x40, 0xae, 0xac, 0x42, + 0xce, 0x47, 0x0d, 0x94, 0x81, 0x44, 0xf5, 0xa8, 0xc6, 0x9a, 0x0e, 0xf5, 0xc6, 0x51, 0x4d, 0x96, + 0x2a, 0xf7, 0x20, 0x49, 0x37, 0x9f, 0x87, 0xcc, 0x6e, 0x5b, 0x79, 0x5e, 0x55, 0xea, 0xac, 0xd1, + 0xd1, 0x6c, 0x7d, 0xd9, 0x50, 0x8e, 0x1b, 0x75, 0x59, 0xc4, 0x85, 0x7f, 0x49, 0x00, 0x0a, 0xea, + 0xdd, 0x63, 0x9b, 0xd7, 0x8c, 0x3d, 0x98, 0xeb, 0xf8, 0x54, 0x76, 0x01, 0xd2, 0x7a, 0x7c, 0xa3, + 0xb4, 0xfd, 0xc9, 0x6f, 0xac, 0x99, 0x85, 0x8e, 0x30, 0x29, 0x30, 0xa6, 0x52, 0x27, 0x42, 0x0d, + 0xe5, 0x43, 0xf1, 0x91, 0x18, 0xa4, 0x40, 0xaa, 0x73, 0x8a, 0x3b, 0x2f, 0x78, 0x14, 0xfe, 0xed, + 0x29, 0x13, 0xd3, 0x54, 0x31, 0x64, 0xb8, 0x35, 0x22, 0x13, 0x4c, 0x2d, 0xd2, 0x03, 0xaa, 0x0a, + 0x29, 0x51, 0x78, 0x4d, 0x5e, 0x89, 0x58, 0x93, 0x7a, 0x33, 0x02, 0xb1, 0x42, 0xe8, 0xfa, 0x09, + 0xcc, 0x59, 0xb6, 0xa7, 0x92, 0xba, 0x8e, 0xa3, 0x00, 0xad, 0xd6, 0x8a, 0x3b, 0x32, 0xb7, 0xd5, + 0xc0, 0xe7, 0x8b, 0x96, 0xed, 0xb5, 0x86, 0x26, 0x2f, 0x85, 0x2a, 0x9f, 0x42, 0x29, 0x7a, 0x46, + 0x28, 0x07, 0xa9, 0xda, 0x5e, 0xa3, 0xb6, 0x2f, 0xc7, 0xd0, 0x1c, 0xe4, 0x77, 0xdb, 0x4a, 0xa3, + 0xf9, 0xb8, 0xa5, 0xee, 0x37, 0x7e, 0xce, 0x1a, 0x53, 0xad, 0xb6, 0x68, 0x4c, 0xf9, 0x55, 0x4e, + 0x4a, 0x4e, 0x57, 0xfe, 0x5b, 0x82, 0xd2, 0xa1, 0x63, 0xf4, 0x35, 0xe7, 0x7c, 0x1f, 0x9f, 0x1f, + 0xbd, 0xd6, 0x06, 0xe8, 0x73, 0x58, 0xb4, 0xf0, 0x6b, 0x75, 0xc0, 0xa8, 0xaa, 0x9f, 0x35, 0x4b, + 0x93, 0xbb, 0x96, 0xf3, 0x16, 0x7e, 0xcd, 0x35, 0x34, 0x79, 0xd2, 0xfc, 0x21, 0xe4, 0x6d, 0x93, + 0x97, 0xe5, 0x58, 0x74, 0x8e, 0xf2, 0x61, 0x21, 0xb0, 0x4d, 0x56, 0x85, 0xd3, 0x40, 0x9e, 0x27, + 0xf3, 0x89, 0xd1, 0x89, 0x09, 0xa3, 0x2d, 0xfc, 0x5a, 0x8c, 0xfe, 0x1c, 0x16, 0x89, 0xee, 0xb1, + 0xd5, 0x25, 0xa7, 0xac, 0xce, 0x36, 0xf5, 0xe8, 0xea, 0xb8, 0xf1, 0xfe, 0x99, 0x04, 0x14, 0xb4, + 0x87, 0x9e, 0xe8, 0x29, 0xd1, 0xcd, 0xff, 0x08, 0x8a, 0x64, 0x31, 0x41, 0xad, 0x24, 0x4d, 0xb9, + 0x0f, 0xb2, 0x66, 0x81, 0xc0, 0x44, 0x8a, 0x2c, 0x2a, 0x90, 0x8a, 0x4f, 0x93, 0xb2, 0x4d, 0x7d, + 0xa4, 0x94, 0xfa, 0xdb, 0x34, 0xa0, 0xc0, 0x06, 0x9f, 0x0e, 0x3d, 0x8d, 0x3a, 0x66, 0x15, 0xd2, + 0xdc, 0x22, 0x24, 0x6a, 0x69, 0xef, 0x4f, 0x75, 0x9e, 0x68, 0xbf, 0x62, 0x2f, 0xa6, 0x70, 0x41, + 0xf4, 0xd3, 0x70, 0xbf, 0x39, 0xbf, 0xfd, 0xde, 0x6c, 0xf0, 0xbd, 0x17, 0x13, 0x8d, 0xe8, 0x7d, + 0x48, 0xb9, 0x1e, 0x01, 0x92, 0x04, 0x85, 0xff, 0xad, 0x29, 0xf2, 0xe3, 0x8b, 0xdf, 0x3c, 0x22, + 0x62, 0xc2, 0x7d, 0xa8, 0x0e, 0xf4, 0x1c, 0x72, 0x7e, 0xd6, 0xc3, 0x9b, 0xd7, 0x8f, 0x66, 0x57, + 0xe8, 0x03, 0x96, 0x80, 0x33, 0x5f, 0x17, 0xaa, 0x42, 0xbe, 0xcf, 0x87, 0x05, 0xb5, 0xed, 0x3a, + 0x4f, 0x3c, 0x41, 0x68, 0xa0, 0x09, 0x68, 0xe8, 0x4b, 0x01, 0x21, 0xd4, 0xa4, 0xe0, 0xec, 0xd8, + 0xa6, 0x79, 0xa2, 0x75, 0x5e, 0xd0, 0x86, 0x9c, 0x0f, 0xce, 0x82, 0x8a, 0xf6, 0x49, 0xfa, 0x28, + 0xdc, 0x8d, 0xb6, 0xd7, 0xf2, 0x33, 0xb4, 0x00, 0x05, 0x9c, 0xed, 0xc5, 0x94, 0x90, 0x38, 0x6a, + 0x43, 0x69, 0x10, 0x71, 0x39, 0x9e, 0xab, 0x3d, 0x98, 0x16, 0xb0, 0x23, 0x83, 0xf7, 0x62, 0xca, + 0x88, 0x38, 0xfa, 0x7d, 0x40, 0x9d, 0x31, 0x53, 0xa6, 0x6d, 0xb5, 0xab, 0x56, 0x39, 0x2a, 0xb0, + 0x17, 0x53, 0x26, 0xa8, 0xa9, 0x7c, 0x0e, 0x29, 0x7a, 0x9d, 0x24, 0x1e, 0x3c, 0x6b, 0xed, 0xb7, + 0xda, 0xcf, 0x5b, 0x0c, 0x62, 0xea, 0x8d, 0x83, 0xc6, 0x71, 0x43, 0x6d, 0xb7, 0x0e, 0x08, 0xc4, + 0xdc, 0x85, 0x25, 0x4e, 0xa8, 0xb6, 0xea, 0xea, 0x73, 0xa5, 0x29, 0x58, 0xf1, 0xca, 0x46, 0x38, + 0xe0, 0x64, 0x21, 0xd9, 0x6a, 0xb7, 0x1a, 0x72, 0x8c, 0x86, 0x9e, 0x7a, 0x5d, 0x96, 0x68, 0xe8, + 0x51, 0xda, 0x87, 0x02, 0x99, 0x76, 0x0a, 0x00, 0xba, 0x6f, 0x02, 0x4f, 0x92, 0xd9, 0xb4, 0x9c, + 0xa9, 0xfc, 0x9d, 0x04, 0x59, 0x92, 0xc9, 0x37, 0xad, 0xae, 0x8d, 0x1e, 0x41, 0x6e, 0xa0, 0x39, + 0xd8, 0xf2, 0x02, 0x07, 0x15, 0x0d, 0x96, 0xec, 0x21, 0x65, 0xf8, 0xf5, 0x7f, 0x96, 0x0d, 0x6c, + 0x5e, 0x55, 0x3d, 0xef, 0x82, 0xcc, 0xd5, 0xb9, 0x9d, 0x53, 0xdc, 0xd7, 0x88, 0x56, 0x56, 0xe2, + 0xbf, 0xcd, 0xb5, 0x96, 0x98, 0xd6, 0x23, 0xca, 0xf6, 0x75, 0x97, 0x06, 0x61, 0xaa, 0xf0, 0xe7, + 0x7f, 0xbd, 0x07, 0x73, 0x23, 0x81, 0xe5, 0x8a, 0x6a, 0x69, 0x9d, 0x56, 0x4b, 0x89, 0x00, 0x2e, + 0xfc, 0x6a, 0x29, 0xce, 0x0b, 0xa5, 0xc8, 0x66, 0x93, 0x33, 0x6e, 0xf6, 0x51, 0x90, 0x3c, 0x31, + 0x87, 0xb8, 0xcb, 0xa1, 0x68, 0xfe, 0x8a, 0xbc, 0xe9, 0x10, 0xe6, 0xfb, 0xb6, 0x6e, 0x74, 0x49, + 0x9a, 0x40, 0xbc, 0xc9, 0x33, 0xfa, 0xac, 0x41, 0x9d, 0xdf, 0x7e, 0x27, 0x64, 0x45, 0x43, 0xcf, + 0x30, 0x37, 0x4f, 0xcd, 0xce, 0xe6, 0xb1, 0x78, 0x7e, 0xe3, 0x3b, 0x92, 0xc3, 0xd2, 0x84, 0x89, + 0x1e, 0x43, 0x46, 0x34, 0x05, 0xb2, 0x34, 0x15, 0x9f, 0x15, 0xc5, 0x44, 0xfa, 0xc4, 0xa5, 0xd1, + 0x2e, 0x94, 0x2c, 0x7c, 0x16, 0xee, 0x61, 0xe5, 0x22, 0x7e, 0x5e, 0x68, 0xe1, 0xb3, 0xc9, 0x0d, + 0xac, 0x82, 0x15, 0x70, 0x74, 0xf4, 0x05, 0x14, 0x23, 0x91, 0x83, 0x3b, 0xc9, 0x8c, 0xd0, 0xe8, + 0xe7, 0xc8, 0xa1, 0x80, 0x82, 0x76, 0x21, 0x23, 0x42, 0x57, 0x9e, 0xee, 0xf1, 0x7a, 0xca, 0x84, + 0x30, 0xda, 0x21, 0x91, 0xe7, 0xcc, 0x0b, 0x22, 0x5a, 0x21, 0xc8, 0x7a, 0x2f, 0x2f, 0xd6, 0xf2, + 0x64, 0x87, 0x13, 0x3a, 0x55, 0x79, 0xcb, 0xa7, 0xeb, 0xe8, 0x09, 0x80, 0xff, 0xec, 0xe9, 0xd2, + 0x5e, 0xea, 0xf4, 0xea, 0xe7, 0x50, 0x0c, 0x0c, 0x96, 0xa4, 0x84, 0xa4, 0xd1, 0x53, 0xc8, 0x09, + 0x88, 0x64, 0x75, 0xc9, 0x74, 0x2c, 0x19, 0x07, 0x6c, 0x01, 0xd3, 0xbe, 0x06, 0x92, 0x92, 0x99, + 0x58, 0x73, 0x31, 0x2f, 0x4e, 0x3e, 0x99, 0x31, 0x25, 0x63, 0xce, 0x55, 0x3b, 0xd5, 0xac, 0x1e, + 0x3e, 0x20, 0xf2, 0x3b, 0xf1, 0xb2, 0xa4, 0x30, 0x55, 0xa8, 0x05, 0x32, 0x3d, 0xb2, 0x30, 0xfe, + 0xcb, 0xf4, 0xd4, 0xde, 0x15, 0x8e, 0x4b, 0x4e, 0x6d, 0x6a, 0x0c, 0xa0, 0x36, 0xf5, 0x34, 0x88, + 0x03, 0xbf, 0x03, 0xa5, 0xae, 0xed, 0xf4, 0x35, 0x4f, 0x15, 0xce, 0x33, 0x1f, 0xb4, 0x31, 0xbe, + 0xbf, 0x58, 0x2b, 0xee, 0x52, 0xae, 0x70, 0x9c, 0x62, 0x37, 0xfc, 0x89, 0xf6, 0x44, 0xb8, 0x5c, + 0xa0, 0xd1, 0xed, 0xc3, 0x59, 0x77, 0x38, 0x1e, 0x2b, 0x5b, 0x90, 0xa6, 0x39, 0xa7, 0x5b, 0x5e, + 0xa4, 0xe7, 0x7e, 0xc3, 0xfc, 0x55, 0xe1, 0x5a, 0xd0, 0x01, 0x94, 0x74, 0x42, 0x21, 0x05, 0x11, + 0x6b, 0x93, 0x2c, 0x51, 0xbd, 0x6b, 0x53, 0xf4, 0x0a, 0xa0, 0x15, 0xd5, 0xb0, 0x10, 0x66, 0xad, + 0x94, 0x36, 0x64, 0xbb, 0x5a, 0xdf, 0x30, 0x0d, 0xec, 0x96, 0x97, 0xa9, 0x9e, 0x8f, 0xae, 0xf4, + 0xea, 0xd1, 0x96, 0xb9, 0x08, 0xae, 0x42, 0x89, 0xef, 0xdc, 0x94, 0x70, 0x4e, 0x2e, 0xf1, 0xce, + 0xb8, 0x73, 0x8b, 0x96, 0x79, 0xa4, 0x7d, 0x4e, 0x9d, 0x9b, 0x7f, 0xe9, 0xe8, 0x3e, 0xc0, 0x2b, + 0x03, 0xbf, 0x56, 0x5f, 0x0e, 0xb1, 0x73, 0x5e, 0x2e, 0x87, 0xab, 0x1f, 0x42, 0xff, 0x82, 0x90, + 0xd1, 0xc7, 0x90, 0xd3, 0xf1, 0x00, 0x5b, 0xba, 0xdb, 0xb6, 0xca, 0x77, 0x69, 0xae, 0xb9, 0x70, + 0x79, 0xb1, 0x96, 0xab, 0x0b, 0x22, 0xc7, 0xd2, 0x60, 0x14, 0xfa, 0x0a, 0x0a, 0xec, 0x03, 0xeb, + 0x6d, 0x6b, 0xe7, 0xbc, 0xbc, 0x42, 0x37, 0xfd, 0x70, 0xc6, 0x4b, 0x09, 0x7a, 0x0b, 0x7e, 0x3b, + 0xb6, 0x1e, 0xd2, 0xa6, 0x44, 0x74, 0xa3, 0x3f, 0x80, 0x82, 0xb0, 0xe6, 0x27, 0xf6, 0x89, 0x5b, + 0x7e, 0xeb, 0xca, 0x5e, 0xe9, 0xe8, 0x5c, 0x4f, 0x03, 0x51, 0x81, 0x55, 0x61, 0x6d, 0xe8, 0x67, + 0x50, 0xf4, 0x9f, 0x66, 0xec, 0x81, 0xe7, 0x96, 0xdf, 0xa6, 0xce, 0xf8, 0x68, 0x56, 0x53, 0xe5, + 0xb2, 0xed, 0x01, 0x6d, 0x23, 0x87, 0xbe, 0xd0, 0x3d, 0xc8, 0xe9, 0x8e, 0x3d, 0x60, 0x31, 0xe3, + 0x9d, 0x75, 0x69, 0x23, 0xe1, 0x17, 0xb8, 0x8e, 0x3d, 0xa0, 0xc1, 0x40, 0x85, 0x92, 0x83, 0x07, + 0xa6, 0xd6, 0xc1, 0x7d, 0x12, 0xcd, 0xec, 0x6e, 0x79, 0x95, 0xce, 0xbe, 0x3d, 0xf3, 0x41, 0xfa, + 0xc2, 0xc2, 0x30, 0x43, 0xfa, 0xda, 0x5d, 0xf4, 0x0c, 0x40, 0x1b, 0xea, 0x86, 0xa7, 0xf6, 0x6d, + 0x1d, 0x97, 0xd7, 0xae, 0x7c, 0xb1, 0x1c, 0x55, 0x5e, 0x25, 0x82, 0x4f, 0x6d, 0x1d, 0xfb, 0x0f, + 0x0f, 0x82, 0x80, 0x3e, 0x86, 0x3c, 0xdd, 0xda, 0x57, 0xf6, 0x09, 0xb1, 0xcd, 0x75, 0xba, 0xb9, + 0x79, 0x7e, 0x97, 0xb9, 0xba, 0x63, 0x0f, 0x9e, 0xd8, 0x27, 0xd4, 0x62, 0xf8, 0x9f, 0x3a, 0x72, + 0xa1, 0xd0, 0xeb, 0xa8, 0x01, 0x7c, 0xde, 0xa3, 0xb7, 0xf8, 0xd9, 0x8c, 0x6b, 0x79, 0x5c, 0x9b, + 0x00, 0xa8, 0x0b, 0x22, 0x0e, 0x3c, 0xae, 0x09, 0x9a, 0xab, 0xe4, 0x7b, 0x1d, 0xff, 0x03, 0xbd, + 0x0f, 0x05, 0xd6, 0x45, 0xe1, 0x0e, 0x50, 0x09, 0x39, 0x40, 0x9e, 0x71, 0x98, 0x0b, 0xb4, 0x80, + 0xb7, 0x5b, 0x54, 0xcd, 0x55, 0xed, 0x2e, 0xbb, 0xb3, 0xfb, 0xb3, 0xc7, 0xf9, 0x12, 0x93, 0xae, + 0xba, 0xed, 0x2e, 0xbd, 0xd8, 0x0e, 0x14, 0xec, 0xa1, 0x77, 0x62, 0x0f, 0x2d, 0x5d, 0xed, 0xbe, + 0x70, 0xcb, 0xef, 0xd2, 0xdd, 0x5e, 0xab, 0x34, 0xf6, 0x77, 0xd7, 0xe6, 0x8a, 0x76, 0xf7, 0x5d, + 0x25, 0x2f, 0xb4, 0xee, 0xbe, 0x70, 0xd1, 0x1f, 0x43, 0xde, 0xb0, 0x82, 0x39, 0x1e, 0x5c, 0x7f, + 0x0e, 0x24, 0x6a, 0x82, 0xa6, 0xe5, 0x4f, 0x01, 0x5c, 0x27, 0x99, 0xe1, 0x03, 0x28, 0xd9, 0xdd, + 0xae, 0x69, 0x58, 0x58, 0x75, 0xb0, 0xe6, 0xda, 0x56, 0xf9, 0xbd, 0xd0, 0x09, 0x16, 0x39, 0x4f, + 0xa1, 0x2c, 0x54, 0x81, 0x9c, 0x87, 0xfb, 0x03, 0xdb, 0xd1, 0x9c, 0xf3, 0xf2, 0xfb, 0xe1, 0xf7, + 0x1a, 0x9f, 0x8c, 0x4e, 0x60, 0x65, 0x68, 0xe1, 0xb3, 0x81, 0xed, 0x62, 0x5d, 0x1d, 0xcb, 0x30, + 0x37, 0x28, 0xc6, 0x3d, 0xe0, 0x8b, 0xba, 0xf3, 0x4c, 0x8c, 0x9c, 0x98, 0x6a, 0xde, 0x19, 0x4e, + 0x64, 0xeb, 0x2b, 0xbf, 0x94, 0x60, 0x7e, 0x2c, 0x46, 0xa2, 0x3f, 0x82, 0x8c, 0x65, 0xeb, 0xa1, + 0xd7, 0xb1, 0x06, 0x9f, 0x26, 0xdd, 0xb2, 0x75, 0xf6, 0x38, 0xf6, 0xa8, 0x67, 0x78, 0xa7, 0xc3, + 0x93, 0xcd, 0x8e, 0xdd, 0xdf, 0xf2, 0x0f, 0x51, 0x3f, 0x09, 0xfe, 0xde, 0x1a, 0xbc, 0xe8, 0x6d, + 0xd1, 0xbf, 0x06, 0x27, 0x9b, 0x4c, 0x4c, 0x49, 0x13, 0xad, 0x4d, 0x1d, 0x7d, 0x04, 0x73, 0xf8, + 0x6c, 0x60, 0x38, 0xa1, 0x3c, 0x31, 0x1e, 0xf2, 0xf9, 0x52, 0xc0, 0x24, 0x06, 0xc2, 0xdf, 0x2f, + 0xfe, 0x21, 0x0e, 0x73, 0x23, 0x11, 0x8a, 0x24, 0xc6, 0xf4, 0x2d, 0x39, 0x92, 0x18, 0x13, 0xca, + 0x15, 0xe9, 0x7a, 0xf8, 0xc7, 0x18, 0x89, 0xdb, 0xfe, 0x18, 0x23, 0xfa, 0x2e, 0x90, 0xba, 0xc6, + 0xbb, 0xc0, 0x4f, 0x60, 0xd9, 0x70, 0x55, 0xcb, 0xb6, 0x44, 0xfb, 0xc6, 0x2f, 0x0f, 0xc3, 0x0f, + 0xee, 0x0b, 0x86, 0xdb, 0xb2, 0x2d, 0xd6, 0xb8, 0xf1, 0x77, 0x1d, 0xbc, 0xcd, 0x67, 0xc6, 0xdf, + 0xe6, 0xfd, 0xf6, 0x4c, 0x52, 0x4e, 0xad, 0x7c, 0x23, 0x41, 0x2e, 0xfc, 0x13, 0xb3, 0x78, 0xb4, + 0x23, 0x31, 0x56, 0x2c, 0xdc, 0xf0, 0x95, 0x33, 0x7a, 0x0a, 0x89, 0xd9, 0x4f, 0x81, 0x5f, 0xed, + 0x9f, 0x40, 0x3e, 0x14, 0x7a, 0x46, 0x4b, 0x72, 0xe9, 0x06, 0x25, 0xf9, 0xbb, 0x90, 0xe6, 0x78, + 0xcb, 0x0c, 0xab, 0xc8, 0xa5, 0x53, 0x0c, 0x6b, 0x53, 0x5f, 0x11, 0x9c, 0xe5, 0xb3, 0xff, 0x5b, + 0x02, 0x0a, 0xe1, 0xd0, 0x44, 0x9c, 0xd3, 0xb0, 0x3a, 0x0e, 0x8d, 0x0b, 0x74, 0xf6, 0x84, 0xff, + 0x98, 0x2a, 0xc8, 0x24, 0x60, 0xf5, 0x0d, 0x4b, 0xa5, 0x0f, 0x78, 0x11, 0xe3, 0xcd, 0xf6, 0x0d, + 0xeb, 0x4b, 0x42, 0xa5, 0x43, 0xb4, 0x33, 0x3e, 0x24, 0x11, 0x19, 0xa2, 0x9d, 0xb1, 0x21, 0x2b, + 0x34, 0xe7, 0x73, 0x3c, 0x5a, 0x98, 0x25, 0x42, 0x59, 0x9c, 0xe3, 0xa1, 0x55, 0xc8, 0xbc, 0x32, + 0x1c, 0x6f, 0xa8, 0x99, 0xb4, 0x06, 0x13, 0xd7, 0x2c, 0x88, 0xc8, 0x82, 0x52, 0x10, 0x8c, 0x5f, + 0x5b, 0xd8, 0xa1, 0x86, 0x93, 0xdf, 0xae, 0xde, 0x20, 0x1a, 0x07, 0x1f, 0x44, 0x91, 0x80, 0x2c, + 0x37, 0x4c, 0x5c, 0xf9, 0x1b, 0x09, 0x8a, 0x91, 0x61, 0xa8, 0x09, 0x73, 0x74, 0xe2, 0xb1, 0x76, + 0xd7, 0x3d, 0xff, 0xc7, 0x62, 0x84, 0x3d, 0xb1, 0xae, 0x2a, 0xda, 0x21, 0x96, 0x8e, 0x3e, 0x87, + 0x12, 0x53, 0xe5, 0x3f, 0xcb, 0x47, 0xcd, 0xaf, 0x40, 0x35, 0x45, 0xdf, 0xe6, 0x0b, 0x76, 0x40, + 0xd3, 0xc3, 0x2f, 0x8e, 0x2b, 0x16, 0xe4, 0x43, 0xd1, 0x7e, 0x06, 0xbb, 0xff, 0x31, 0x24, 0x7d, + 0x14, 0x9a, 0x31, 0x8a, 0x51, 0x01, 0x3e, 0xdf, 0xd7, 0x12, 0x2c, 0x4e, 0x8a, 0xba, 0x11, 0x7f, + 0x62, 0x86, 0x34, 0x93, 0x3f, 0xdd, 0x0f, 0x67, 0x43, 0xcc, 0xb8, 0xc4, 0x2b, 0x58, 0x90, 0x0f, + 0xbd, 0xe7, 0x9b, 0x38, 0xb3, 0xad, 0xb9, 0x88, 0x89, 0x93, 0x2a, 0x27, 0x64, 0xe4, 0x95, 0x47, + 0xa2, 0x0d, 0x03, 0x90, 0x3e, 0x7c, 0xb6, 0x73, 0xd0, 0xac, 0x4d, 0x6c, 0xa1, 0xa0, 0x3c, 0x64, + 0xda, 0xbb, 0xbb, 0x07, 0xcd, 0x56, 0x43, 0x4e, 0x54, 0x36, 0x20, 0xe7, 0x27, 0x36, 0xa8, 0x00, + 0xd9, 0x7a, 0xf3, 0xa8, 0xba, 0x73, 0xd0, 0xa8, 0xcb, 0x31, 0x54, 0x84, 0x9c, 0xd2, 0xa8, 0xd6, + 0x69, 0xa3, 0x46, 0x96, 0x3e, 0xcd, 0xfe, 0xe2, 0xeb, 0x35, 0x89, 0x03, 0x4f, 0x5a, 0xce, 0x3c, + 0x49, 0x66, 0x91, 0xbc, 0x50, 0xf9, 0xdf, 0x38, 0xa0, 0xba, 0xe6, 0x69, 0xc4, 0xfe, 0xae, 0xd1, + 0xce, 0x88, 0x5f, 0x71, 0x53, 0xd1, 0x12, 0x35, 0x71, 0xab, 0x12, 0x75, 0x62, 0xc3, 0x22, 0x79, + 0x9b, 0x86, 0xc5, 0x8d, 0xfa, 0x26, 0xe3, 0xe5, 0x55, 0xfa, 0xe6, 0xe5, 0x55, 0x70, 0x13, 0x95, + 0xcb, 0x34, 0x94, 0x8e, 0xcf, 0x07, 0xe1, 0x93, 0xbf, 0x51, 0xe7, 0x6b, 0x52, 0x7f, 0x2b, 0x7e, + 0xfd, 0xfe, 0xd6, 0x15, 0x3f, 0x4c, 0x66, 0xd7, 0x9e, 0xbc, 0xe2, 0xda, 0xeb, 0x90, 0x7c, 0x61, + 0x58, 0xac, 0x3d, 0x5b, 0x9a, 0x7a, 0xe1, 0xd1, 0xdd, 0x6e, 0xee, 0x1b, 0x96, 0x2e, 0xe6, 0x21, + 0xd2, 0xe8, 0xe7, 0x50, 0xc0, 0xd6, 0xb0, 0xaf, 0xf6, 0x71, 0xff, 0x04, 0x3b, 0xe2, 0x9c, 0x1f, + 0xce, 0xa6, 0xad, 0x61, 0x0d, 0xfb, 0x4f, 0xa9, 0xa0, 0x48, 0x8a, 0xb1, 0x4f, 0x71, 0xd1, 0x43, + 0x48, 0x69, 0xa6, 0xa1, 0xb9, 0xbc, 0xe1, 0x75, 0xd5, 0xaf, 0xf4, 0xd8, 0x40, 0xf4, 0xbb, 0x50, + 0xd4, 0x1c, 0x47, 0x3b, 0xe7, 0xbf, 0x36, 0xd3, 0x69, 0x5b, 0x98, 0x5b, 0x0c, 0x49, 0x65, 0xab, + 0x84, 0x49, 0x7f, 0x60, 0x26, 0x0e, 0x22, 0xaf, 0xf9, 0xa4, 0x48, 0x8b, 0x2e, 0x77, 0xbb, 0x16, + 0x1d, 0xdc, 0xc6, 0xe2, 0xc7, 0x8d, 0x37, 0x7f, 0x73, 0xe3, 0x5d, 0xf9, 0x85, 0x04, 0x10, 0x9c, + 0x33, 0xfa, 0x31, 0xdc, 0x19, 0x9c, 0x9e, 0xbb, 0xf4, 0xc7, 0x7d, 0x0e, 0x1e, 0x38, 0xd8, 0xc5, + 0x16, 0x43, 0x56, 0x6a, 0xbc, 0x05, 0x65, 0x59, 0xb0, 0x95, 0x08, 0x17, 0x7d, 0x06, 0xcb, 0xe2, + 0x47, 0x81, 0x23, 0x72, 0xe1, 0x7c, 0x70, 0x89, 0x8f, 0x89, 0x0a, 0x73, 0xc0, 0x7c, 0x0b, 0x92, + 0xc4, 0x7e, 0x08, 0x34, 0x36, 0x5a, 0xcf, 0x9e, 0xca, 0x31, 0x94, 0x83, 0x54, 0xf5, 0xa0, 0x59, + 0x3d, 0x0a, 0xc3, 0x5d, 0xe5, 0xef, 0x13, 0x20, 0x33, 0x0b, 0xbf, 0xad, 0x9b, 0x4d, 0xcf, 0x58, + 0x7f, 0x73, 0x93, 0x37, 0x8a, 0x8a, 0xc9, 0x37, 0x8f, 0x8a, 0xa9, 0x37, 0x84, 0x8a, 0xe9, 0x5b, + 0xa0, 0x62, 0xe6, 0x8d, 0xa0, 0xe2, 0x37, 0x71, 0x80, 0xd0, 0x55, 0xfd, 0x34, 0xfc, 0xdf, 0x2e, + 0xa6, 0x37, 0x5f, 0x47, 0x92, 0xa7, 0xbd, 0x98, 0xf8, 0x4f, 0x19, 0x8f, 0x21, 0xab, 0xf3, 0x08, + 0xc7, 0xb3, 0x87, 0xa9, 0x5d, 0xce, 0xb1, 0x40, 0xb8, 0x17, 0x53, 0x7c, 0x61, 0xf4, 0x59, 0xe4, + 0x57, 0xbe, 0x0f, 0x66, 0xc2, 0xa4, 0x3d, 0xf1, 0x0b, 0x89, 0x2a, 0xa4, 0x19, 0x36, 0xf3, 0xbb, + 0x9f, 0xd6, 0x27, 0x1f, 0xb5, 0xd4, 0xbd, 0x98, 0xc2, 0x05, 0xf9, 0xdb, 0x49, 0x06, 0x52, 0x43, + 0xcb, 0xb0, 0xad, 0x1f, 0x2a, 0xe1, 0xb7, 0x79, 0x51, 0xfe, 0x90, 0xa8, 0x4f, 0xff, 0xd6, 0x3c, + 0xac, 0xb3, 0x57, 0x9c, 0x67, 0xd6, 0x2b, 0x9f, 0x20, 0xa1, 0x12, 0x00, 0xe7, 0x1b, 0x56, 0x4f, + 0x8e, 0xd3, 0x9c, 0xc1, 0xb1, 0x07, 0x03, 0xf2, 0x95, 0xd8, 0xf9, 0xc1, 0xb7, 0xff, 0xb9, 0x1a, + 0xfb, 0xf6, 0x72, 0x55, 0xfa, 0xd5, 0xe5, 0xaa, 0xf4, 0xeb, 0xcb, 0x55, 0xe9, 0x3f, 0x2e, 0x57, + 0xa5, 0xbf, 0xf8, 0x6e, 0x35, 0xf6, 0xab, 0xef, 0x56, 0x63, 0xbf, 0xfe, 0x6e, 0x35, 0xf6, 0x7b, + 0x19, 0xbe, 0xd0, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x20, 0xad, 0xd0, 0xc9, 0xda, 0x33, 0x00, + 0x00, } diff --git a/pkg/sql/sqlbase/structured.proto b/pkg/sql/sqlbase/structured.proto index 4545f66c2217..534ca839c8db 100644 --- a/pkg/sql/sqlbase/structured.proto +++ b/pkg/sql/sqlbase/structured.proto @@ -611,6 +611,86 @@ message DescriptorMutation { optional bool rollback = 7 [(gogoproto.nullable) = false]; } +// A table descriptor is named through a name map stored in the +// system.namespace table: a map from {parent_id, table_name} -> id. +// This name map can be cached for performance on a node in the cluster +// making reassigning a name complicated. In particular, since a +// name cannot be withdrawn across a cluster in a transaction at +// timestamp T, we have to worry about the following: +// +// 1. A table is dropped at T, and the name and descriptor are still +// cached and used by transactions at timestamps >= T. +// 2. A table is renamed from foo to bar at T, and both names foo and bar +// can be used by transactions at timestamps >= T. +// 3. A name foo is reassigned from one table to another at T, and the name +// foo can reference two different tables at timestamps >= T. +// +// The system ensures that a name can be resolved only to a single +// descriptor at a timestamp thereby permitting 1 and 2, but not 3 +// (the name references two tables). +// +// The transaction at T is followed by a time period when names no longer +// a part of the namespace are drained from the system. Once the old name +// is drained from the system another transaction at timestamp S is +// executed to release the name for future use. The interval from T to S +// is called the name drain interval: If the T transaction is removing +// the name foo then, at timestamps above S, foo can no longer be resolved. +// +// Consider a transaction at T in which name B is dropped, a new name C is +// created. Name C is viable as soon as the transaction commits. +// When the transaction at S commits, the name B is released for reuse. +// +// The transaction at S runs through the schema changer, with the system +// returning a response to the client initiating transaction T only after +// transaction at S is committed. So effectively the SQL transaction once +// it returns can be followed by SQL transactions that do not observe +// old name mappings. +// +// Note: an exception to this is #19925 which needs to be fixed. +// +// In order for transaction at S to act properly the system.namespace +// table entry for an old name references the descriptor who was the +// prior owner of the name requiring draining. +// +// Before T: B -> Desc B +// +// After T and before S: B -> Desc B, C -> Desc C +// +// After S: C -> Desc C +// +// Between T and S the name B is drained and the system is unable +// to assign it to another descriptor. +// +// BEGIN; +// RENAME foo TO bar; +// CREATE foo; +// +// will fail because CREATE foo is executed at T. +// +// RENAME foo TO bar; +// CREATE foo; +// +// will succeed because the RENAME returns after S and CREATE foo is +// executed after S. +// +// The above scheme suffers from the problem that a transaction can observe +// the partial effect of a committed transaction during the drain interval. +// For instance during the drain interval a transaction can see the correct +// assignment for C, and the old assignments for B. +// +message NameInfo { + option (gogoproto.equal) = true; + // The database that the table belonged to before the rename (tables can be + // renamed from one db to another). + optional uint32 parent_id = 1 [(gogoproto.nullable) = false, + (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; + // The schemaID of the schema the table belongs to before the rename/drop. + // Required to correctly identify which namespace entry to reclaim. + optional uint32 parent_schema_id = 3 [(gogoproto.nullable) = false, + (gogoproto.customname) = "ParentSchemaID", (gogoproto.casttype) = "ID"]; + optional string name = 2 [(gogoproto.nullable) = false]; +} + // A TableDescriptor represents a table or view and is stored in a // structured metadata key. The TableDescriptor has a globally-unique ID, // while its member {Column,Index}Descriptors have locally-unique IDs. @@ -619,21 +699,17 @@ message TableDescriptor { // Needed for the descriptorProto interface. option (gogoproto.goproto_getters) = true; + // The following 5 fields: name, id, version, modification_time, and + // draining_names are metadata fields required by all descriptors to be + // leasable. The reason that they do not exist in a message of their own is + // to ease the migration burden as not all descriptors initially had all of + // these fields but many had some. + // The table name. It should be normalized using NormalizeName() before // comparing it. optional string name = 1 [(gogoproto.nullable) = false]; optional uint32 id = 3 [(gogoproto.nullable) = false, - (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; - // ID of the parent database. - optional uint32 parent_id = 4 [(gogoproto.nullable) = false, - (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; - // ID of the parent schema. For backwards compatibility, 0 means the table is - // scoped under the public physical schema (id 29). Because of this backward - // compatibility issue, this field should not be accessed directly or through - // the generated getter. Instead, use GetParentSchemaID() which is defined in - // structured.go. - optional uint32 unexposed_parent_schema_id = 40 [(gogoproto.nullable) = false, - (gogoproto.customname) = "UnexposedParentSchemaID", (gogoproto.casttype) = "ID"]; + (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; // Monotonically increasing version of the table descriptor. // // The design maintains two invariants: @@ -651,9 +727,6 @@ message TableDescriptor { // Multiple schema change mutations can be grouped together on a // particular version increment. optional uint32 version = 5 [(gogoproto.nullable) = false, (gogoproto.casttype) = "DescriptorVersion"]; - - reserved 6; - // Last modification time of the table descriptor. // Starting in 19.2 this field's value may sometime be zero-valued in which // case the MVCC timestamp of the row containing the value should be used to @@ -664,6 +737,27 @@ message TableDescriptor { // hlc timestamp to ensure that this field is set properly when extracted from // a Descriptor. optional util.hlc.Timestamp modification_time = 7 [(gogoproto.nullable) = false]; + // A list of draining names. The draining name entries are drained from + // the cluster wide name caches by incrementing the version for this + // descriptor and ensuring that there are no leases on prior + // versions of the descriptor. This field is then cleared and the version + // of the descriptor incremented. + repeated NameInfo draining_names = 21 [(gogoproto.nullable) = false]; + + // ID of the parent database. + optional uint32 parent_id = 4 [(gogoproto.nullable) = false, + (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; + // ID of the parent schema. For backwards compatibility, 0 means the table is + // scoped under the public physical schema (id 29). Because of this backward + // compatibility issue, this field should not be accessed directly or through + // the generated getter. Instead, use GetParentSchemaID() which is defined in + // structured.go. + optional uint32 unexposed_parent_schema_id = 40 [(gogoproto.nullable) = false, + (gogoproto.customname) = "UnexposedParentSchemaID", (gogoproto.casttype) = "ID"]; + + + reserved 6; + repeated ColumnDescriptor columns = 8 [(gogoproto.nullable) = false]; // next_column_id is used to ensure that deleted column ids are not reused. optional uint32 next_column_id = 9 [(gogoproto.nullable) = false, @@ -749,93 +843,6 @@ message TableDescriptor { repeated CheckConstraint checks = 20; - // A table descriptor is named through a name map stored in the - // system.namespace table: a map from {parent_id, table_name} -> id. - // This name map can be cached for performance on a node in the cluster - // making reassigning a name complicated. In particular, since a - // name cannot be withdrawn across a cluster in a transaction at - // timestamp T, we have to worry about the following: - // - // 1. A table is dropped at T, and the name and descriptor are still - // cached and used by transactions at timestamps >= T. - // 2. A table is renamed from foo to bar at T, and both names foo and bar - // can be used by transactions at timestamps >= T. - // 3. A name foo is reassigned from one table to another at T, and the name - // foo can reference two different tables at timestamps >= T. - // - // The system ensures that a name can be resolved only to a single - // descriptor at a timestamp thereby permitting 1 and 2, but not 3 - // (the name references two tables). - // - // The transaction at T is followed by a time period when names no longer - // a part of the namespace are drained from the system. Once the old name - // is drained from the system another transaction at timestamp S is - // executed to release the name for future use. The interval from T to S - // is called the name drain interval: If the T transaction is removing - // the name foo then, at timestamps above S, foo can no longer be resolved. - // - // Consider a transaction at T in which name B is dropped, a new name C is - // created. Name C is viable as soon as the transaction commits. - // When the transaction at S commits, the name B is released for reuse. - // - // The transaction at S runs through the schema changer, with the system - // returning a response to the client initiating transaction T only after - // transaction at S is committed. So effectively the SQL transaction once - // it returns can be followed by SQL transactions that do not observe - // old name mappings. - // - // Note: an exception to this is #19925 which needs to be fixed. - // - // In order for transaction at S to act properly the system.namespace - // table entry for an old name references the descriptor who was the - // prior owner of the name requiring draining. - // - // Before T: B -> Desc B - // - // After T and before S: B -> Desc B, C -> Desc C - // - // After S: C -> Desc C - // - // Between T and S the name B is drained and the system is unable - // to assign it to another descriptor. - // - // BEGIN; - // RENAME foo TO bar; - // CREATE foo; - // - // will fail because CREATE foo is executed at T. - // - // RENAME foo TO bar; - // CREATE foo; - // - // will succeed because the RENAME returns after S and CREATE foo is - // executed after S. - // - // The above scheme suffers from the problem that a transaction can observe - // the partial effect of a committed transaction during the drain interval. - // For instance during the drain interval a transaction can see the correct - // assignment for C, and the old assignments for B. - // - message NameInfo { - option (gogoproto.equal) = true; - // The database that the table belonged to before the rename (tables can be - // renamed from one db to another). - optional uint32 parent_id = 1 [(gogoproto.nullable) = false, - (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; - // The schemaID of the schema the table belongs to before the rename/drop. - // Required to correctly identify which namespace entry to reclaim. - optional uint32 parent_schema_id = 3 [(gogoproto.nullable) = false, - (gogoproto.customname) = "ParentSchemaID", (gogoproto.casttype) = "ID"]; - optional string name = 2 [(gogoproto.nullable) = false]; - } - - // A list of draining names. The draining name entries are drained from - // the cluster wide name caches by incrementing the version for this - // descriptor and ensuring that there are no leases on prior - // versions of the descriptor. This field is then cleared and the version - // of the descriptor incremented. - repeated NameInfo draining_names = 21 [(gogoproto.nullable) = false]; - // The TableDescriptor is used for views in addition to tables. Views // use mostly the same fields as tables, but need to track the actual // query from the view definition as well. @@ -1019,9 +1026,16 @@ message DatabaseDescriptor { // Needed for the descriptorProto interface. option (gogoproto.goproto_getters) = true; + // Shared descriptor fields. See the discussion at the top of TableDescriptor. + optional string name = 1 [(gogoproto.nullable) = false]; optional uint32 id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; + // Last modification time of the descriptor. + optional util.hlc.Timestamp modification_time = 4 [(gogoproto.nullable) = false]; + optional uint32 version = 5 [(gogoproto.nullable) = false, (gogoproto.casttype) = "DescriptorVersion"]; + repeated NameInfo draining_names = 6 [(gogoproto.nullable) = false]; + optional PrivilegeDescriptor privileges = 3; } @@ -1033,6 +1047,21 @@ message TypeDescriptor { // Needed for the descriptorProto interface. option (gogoproto.goproto_getters) = true; + // Shared descriptor fields. See the discussion at the top of TableDescriptor. + + // name is the current name of this user defined type. + optional string name = 3 [(gogoproto.nullable) = false]; + + // id is the globally unique ID for this type. + optional uint32 id = 4 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; + + + optional uint32 version = 9 [(gogoproto.nullable) = false, (gogoproto.casttype) = "DescriptorVersion"]; + // Last modification time of the descriptor. + optional util.hlc.Timestamp modification_time = 10 [(gogoproto.nullable) = false]; + repeated NameInfo draining_names = 11 [(gogoproto.nullable) = false]; + + // Fields that are shared among all kinds of user defined types. // parent_id represents the ID of the database that this type resides in. @@ -1043,12 +1072,6 @@ message TypeDescriptor { optional uint32 parent_schema_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "ParentSchemaID", (gogoproto.casttype) = "ID"]; - // name is the current name of this user defined type. - optional string name = 3 [(gogoproto.nullable) = false]; - - // id is the globally unique ID for this type. - optional uint32 id = 4 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; - // array_type_id is the globally unique ID for the implicitly created array // type for this type. It is only set when the type descriptor points to a // non-array type. @@ -1083,7 +1106,6 @@ message TypeDescriptor { // alias is the types.T that this descriptor is an alias for. optional sql.sem.types.T alias = 7; - // TODO (rohany): Do we need a draining names like the table descriptor? } // SchemaDescriptor represents a physical schema and is stored in a structured @@ -1093,9 +1115,7 @@ message SchemaDescriptor { // Needed for the descriptorProto interface. option (gogoproto.goproto_getters) = true; - // parent_id refers to the database the schema is in. - optional uint32 parent_id = 1 - [(gogoproto.nullable) = false, (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; + // Shared descriptor fields. See the discussion at the top of TableDescriptor. // name is the name of the schema. optional string name = 2 [(gogoproto.nullable) = false]; @@ -1104,6 +1124,15 @@ message SchemaDescriptor { optional uint32 id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID", (gogoproto.casttype) = "ID"]; + // Last modification time of the descriptor. + optional util.hlc.Timestamp modification_time = 5 [(gogoproto.nullable) = false]; + optional uint32 version = 6 [(gogoproto.nullable) = false, (gogoproto.casttype) = "DescriptorVersion"]; + repeated NameInfo draining_names = 7 [(gogoproto.nullable) = false]; + + // parent_id refers to the database the schema is in. + optional uint32 parent_id = 1 + [(gogoproto.nullable) = false, (gogoproto.customname) = "ParentID", (gogoproto.casttype) = "ID"]; + // privileges contains the privileges for the schema. optional PrivilegeDescriptor privileges = 4; } diff --git a/pkg/sql/sqlbase/system.go b/pkg/sql/sqlbase/system.go index b2fd8c90ebf6..d01ed4481c7b 100644 --- a/pkg/sql/sqlbase/system.go +++ b/pkg/sql/sqlbase/system.go @@ -394,15 +394,18 @@ var ( singleID1 = []ColumnID{1} ) +// SystemDatabaseName is the name of the system database. +const SystemDatabaseName = "system" + // MakeSystemDatabaseDesc constructs a copy of the system database // descriptor. -func MakeSystemDatabaseDesc() DatabaseDescriptor { - return DatabaseDescriptor{ - Name: "system", - ID: keys.SystemDatabaseID, +func MakeSystemDatabaseDesc() *ImmutableDatabaseDescriptor { + return NewInitialDatabaseDescriptorWithPrivileges( + keys.SystemDatabaseID, + SystemDatabaseName, // Assign max privileges to root user. - Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.SystemDatabaseID]), - } + NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.SystemDatabaseID]), + ) } // These system config TableDescriptor literals should match the descriptor @@ -419,7 +422,7 @@ var ( NamespaceTableName = "namespace" // DeprecatedNamespaceTable is the descriptor for the deprecated namespace table. - DeprecatedNamespaceTable = TableDescriptor{ + DeprecatedNamespaceTable = NewImmutableTableDescriptor(TableDescriptor{ Name: NamespaceTableName, ID: keys.DeprecatedNamespaceTableID, ParentID: keys.SystemDatabaseID, @@ -449,7 +452,7 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.DeprecatedNamespaceTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // NamespaceTable is the descriptor for the namespace table. Note that this // table should only be written to via KV puts, not via the SQL layer. Some @@ -463,7 +466,7 @@ var ( // // TODO(solon): in 20.2, we should change the Name of this descriptor // back to "namespace". - NamespaceTable = TableDescriptor{ + NamespaceTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "namespace2", ID: keys.NamespaceTableID, ParentID: keys.SystemDatabaseID, @@ -494,10 +497,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.DeprecatedNamespaceTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // DescriptorTable is the descriptor for the descriptor table. - DescriptorTable = TableDescriptor{ + DescriptorTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "descriptor", ID: keys.DescriptorTableID, Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.DescriptorTableID]), @@ -521,13 +524,13 @@ var ( NextIndexID: 2, FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) falseBoolString = "false" trueBoolString = "true" // UsersTable is the descriptor for the users table. - UsersTable = TableDescriptor{ + UsersTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "users", ID: keys.UsersTableID, ParentID: keys.SystemDatabaseID, @@ -550,10 +553,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.UsersTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // ZonesTable is the descriptor for the zones table. - ZonesTable = TableDescriptor{ + ZonesTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "zones", ID: keys.ZonesTableID, ParentID: keys.SystemDatabaseID, @@ -583,11 +586,11 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ZonesTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // SettingsTable is the descriptor for the settings table. // It contains all cluster settings for which a value has been set. - SettingsTable = TableDescriptor{ + SettingsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "settings", ID: keys.SettingsTableID, ParentID: keys.SystemDatabaseID, @@ -614,10 +617,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.SettingsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // DescIDSequence is the descriptor for the descriptor ID sequence. - DescIDSequence = TableDescriptor{ + DescIDSequence = NewImmutableTableDescriptor(TableDescriptor{ Name: "descriptor_id_seq", ID: keys.DescIDSequenceID, ParentID: keys.SystemDatabaseID, @@ -648,9 +651,9 @@ var ( }, Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.DescIDSequenceID]), FormatVersion: InterleavedFormatVersion, - } + }) - TenantsTable = TableDescriptor{ + TenantsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "tenants", ID: keys.TenantsTableID, ParentID: keys.SystemDatabaseID, @@ -679,7 +682,7 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.TenantsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) ) // These system TableDescriptor literals should match the descriptor that @@ -689,7 +692,7 @@ var ( // suggestions on writing and maintaining them. var ( // LeaseTable is the descriptor for the leases table. - LeaseTable = TableDescriptor{ + LeaseTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "lease", ID: keys.LeaseTableID, ParentID: keys.SystemDatabaseID, @@ -719,12 +722,12 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.LeaseTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) uuidV4String = "uuid_v4()" // EventLogTable is the descriptor for the event log table. - EventLogTable = TableDescriptor{ + EventLogTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "eventlog", ID: keys.EventLogTableID, ParentID: keys.SystemDatabaseID, @@ -760,12 +763,12 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.EventLogTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) uniqueRowIDString = "unique_rowid()" // RangeEventTable is the descriptor for the range log table. - RangeEventTable = TableDescriptor{ + RangeEventTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "rangelog", ID: keys.RangeEventTableID, ParentID: keys.SystemDatabaseID, @@ -803,10 +806,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.RangeEventTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // UITable is the descriptor for the ui table. - UITable = TableDescriptor{ + UITable = NewImmutableTableDescriptor(TableDescriptor{ Name: "ui", ID: keys.UITableID, ParentID: keys.SystemDatabaseID, @@ -829,13 +832,13 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.UITableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) nowString = "now():::TIMESTAMP" nowTZString = "now():::TIMESTAMPTZ" // JobsTable is the descriptor for the jobs table. - JobsTable = TableDescriptor{ + JobsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "jobs", ID: keys.JobsTableID, ParentID: keys.SystemDatabaseID, @@ -899,10 +902,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.JobsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // WebSessions table to authenticate sessions over stateless connections. - WebSessionsTable = TableDescriptor{ + WebSessionsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "web_sessions", ID: keys.WebSessionsTableID, ParentID: keys.SystemDatabaseID, @@ -964,10 +967,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.WebSessionsTableID]), NextMutationID: 1, FormatVersion: 3, - } + }) // TableStatistics table to hold statistics about columns and column groups. - TableStatisticsTable = TableDescriptor{ + TableStatisticsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "table_statistics", ID: keys.TableStatisticsTableID, ParentID: keys.SystemDatabaseID, @@ -1017,12 +1020,12 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.TableStatisticsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) latLonDecimal = types.MakeDecimal(18, 15) // LocationsTable is the descriptor for the locations table. - LocationsTable = TableDescriptor{ + LocationsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "locations", ID: keys.LocationsTableID, ParentID: keys.SystemDatabaseID, @@ -1057,10 +1060,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.LocationsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // RoleMembersTable is the descriptor for the role_members table. - RoleMembersTable = TableDescriptor{ + RoleMembersTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "role_members", ID: keys.RoleMembersTableID, ParentID: keys.SystemDatabaseID, @@ -1123,10 +1126,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.RoleMembersTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // CommentsTable is the descriptor for the comments table. - CommentsTable = TableDescriptor{ + CommentsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "comments", ID: keys.CommentsTableID, ParentID: keys.SystemDatabaseID, @@ -1157,9 +1160,9 @@ var ( Privileges: newCommentPrivilegeDescriptor(SystemAllowedPrivileges[keys.CommentsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) - ReportsMetaTable = TableDescriptor{ + ReportsMetaTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "reports_meta", ID: keys.ReportsMetaTableID, ParentID: keys.SystemDatabaseID, @@ -1194,13 +1197,13 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReportsMetaTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) ReplicationConstraintStatsTableTTL = time.Minute * 10 // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationConstraintStatsTable = TableDescriptor{ + ReplicationConstraintStatsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "replication_constraint_stats", ID: keys.ReplicationConstraintStatsTableID, ParentID: keys.SystemDatabaseID, @@ -1248,12 +1251,12 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationConstraintStatsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationCriticalLocalitiesTable = TableDescriptor{ + ReplicationCriticalLocalitiesTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "replication_critical_localities", ID: keys.ReplicationCriticalLocalitiesTableID, ParentID: keys.SystemDatabaseID, @@ -1297,13 +1300,13 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationCriticalLocalitiesTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) ReplicationStatsTableTTL = time.Minute * 10 // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationStatsTable = TableDescriptor{ + ReplicationStatsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "replication_stats", ID: keys.ReplicationStatsTableID, ParentID: keys.SystemDatabaseID, @@ -1349,9 +1352,9 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationStatsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) - ProtectedTimestampsMetaTable = TableDescriptor{ + ProtectedTimestampsMetaTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "protected_ts_meta", ID: keys.ProtectedTimestampsMetaTableID, ParentID: keys.SystemDatabaseID, @@ -1400,9 +1403,9 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ReplicationStatsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) - ProtectedTimestampsRecordsTable = TableDescriptor{ + ProtectedTimestampsRecordsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "protected_ts_records", ID: keys.ProtectedTimestampsRecordsTableID, ParentID: keys.SystemDatabaseID, @@ -1441,10 +1444,10 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ProtectedTimestampsRecordsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // RoleOptionsTable is the descriptor for the role_options table. - RoleOptionsTable = TableDescriptor{ + RoleOptionsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "role_options", ID: keys.RoleOptionsTableID, ParentID: keys.SystemDatabaseID, @@ -1478,9 +1481,9 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.RoleOptionsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) - StatementBundleChunksTable = TableDescriptor{ + StatementBundleChunksTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "statement_bundle_chunks", ID: keys.StatementBundleChunksTableID, ParentID: keys.SystemDatabaseID, @@ -1505,11 +1508,11 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.StatementBundleChunksTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // TODO(andrei): Add a foreign key reference to the statement_diagnostics table when // it no longer requires us to create an index on statement_diagnostics_id. - StatementDiagnosticsRequestsTable = TableDescriptor{ + StatementDiagnosticsRequestsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "statement_diagnostics_requests", ID: keys.StatementDiagnosticsRequestsTableID, ParentID: keys.SystemDatabaseID, @@ -1551,9 +1554,9 @@ var ( SystemAllowedPrivileges[keys.StatementDiagnosticsRequestsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) - StatementDiagnosticsTable = TableDescriptor{ + StatementDiagnosticsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "statement_diagnostics", ID: keys.StatementDiagnosticsTableID, ParentID: keys.SystemDatabaseID, @@ -1584,10 +1587,10 @@ var ( SystemAllowedPrivileges[keys.StatementDiagnosticsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) // ScheduledJobsTable is the descriptor for the scheduled jobs table. - ScheduledJobsTable = TableDescriptor{ + ScheduledJobsTable = NewImmutableTableDescriptor(TableDescriptor{ Name: "scheduled_jobs", ID: keys.ScheduledJobsTableID, ParentID: keys.SystemDatabaseID, @@ -1640,7 +1643,7 @@ var ( Privileges: NewCustomSuperuserPrivilegeDescriptor(SystemAllowedPrivileges[keys.ScheduledJobsTableID]), FormatVersion: InterleavedFormatVersion, NextMutationID: 1, - } + }) ) // addSystemDescriptorsToSchema populates the supplied MetadataSchema @@ -1649,58 +1652,60 @@ var ( // can be used to persist these descriptors to the cockroach store. func addSystemDescriptorsToSchema(target *MetadataSchema) { // Add system database. - target.AddDescriptor(keys.RootNamespaceID, &SystemDB) + target.AddDescriptor(keys.RootNamespaceID, SystemDB) // Add system config tables. - target.AddDescriptor(keys.SystemDatabaseID, &DeprecatedNamespaceTable) - target.AddDescriptor(keys.SystemDatabaseID, &NamespaceTable) - target.AddDescriptor(keys.SystemDatabaseID, &DescriptorTable) - target.AddDescriptor(keys.SystemDatabaseID, &UsersTable) + target.AddDescriptor(keys.SystemDatabaseID, DeprecatedNamespaceTable) + target.AddDescriptor(keys.SystemDatabaseID, NamespaceTable) + target.AddDescriptor(keys.SystemDatabaseID, DescriptorTable) + target.AddDescriptor(keys.SystemDatabaseID, UsersTable) if target.codec.ForSystemTenant() { - target.AddDescriptor(keys.SystemDatabaseID, &ZonesTable) + target.AddDescriptor(keys.SystemDatabaseID, ZonesTable) } - target.AddDescriptor(keys.SystemDatabaseID, &SettingsTable) + target.AddDescriptor(keys.SystemDatabaseID, SettingsTable) if !target.codec.ForSystemTenant() { // Only add the descriptor ID sequence if this is a non-system tenant. // System tenants use the global descIDGenerator key. See #48513. - target.AddDescriptor(keys.SystemDatabaseID, &DescIDSequence) + target.AddDescriptor(keys.SystemDatabaseID, DescIDSequence) } if target.codec.ForSystemTenant() { // Only add the tenant table if this is the system tenant. - target.AddDescriptor(keys.SystemDatabaseID, &TenantsTable) + target.AddDescriptor(keys.SystemDatabaseID, TenantsTable) } // Add all the other system tables. - target.AddDescriptor(keys.SystemDatabaseID, &LeaseTable) - target.AddDescriptor(keys.SystemDatabaseID, &EventLogTable) - target.AddDescriptor(keys.SystemDatabaseID, &RangeEventTable) - target.AddDescriptor(keys.SystemDatabaseID, &UITable) - target.AddDescriptor(keys.SystemDatabaseID, &JobsTable) - target.AddDescriptor(keys.SystemDatabaseID, &WebSessionsTable) - target.AddDescriptor(keys.SystemDatabaseID, &RoleOptionsTable) + target.AddDescriptor(keys.SystemDatabaseID, LeaseTable) + target.AddDescriptor(keys.SystemDatabaseID, EventLogTable) + target.AddDescriptor(keys.SystemDatabaseID, RangeEventTable) + target.AddDescriptor(keys.SystemDatabaseID, UITable) + target.AddDescriptor(keys.SystemDatabaseID, JobsTable) + target.AddDescriptor(keys.SystemDatabaseID, WebSessionsTable) + target.AddDescriptor(keys.SystemDatabaseID, RoleOptionsTable) // Tables introduced in 2.0, added here for 2.1. - target.AddDescriptor(keys.SystemDatabaseID, &TableStatisticsTable) - target.AddDescriptor(keys.SystemDatabaseID, &LocationsTable) - target.AddDescriptor(keys.SystemDatabaseID, &RoleMembersTable) + target.AddDescriptor(keys.SystemDatabaseID, TableStatisticsTable) + target.AddDescriptor(keys.SystemDatabaseID, LocationsTable) + target.AddDescriptor(keys.SystemDatabaseID, RoleMembersTable) // The CommentsTable has been introduced in 2.2. It was added here since it // was introduced, but it's also created as a migration for older clusters. - target.AddDescriptor(keys.SystemDatabaseID, &CommentsTable) - target.AddDescriptor(keys.SystemDatabaseID, &ReportsMetaTable) - target.AddDescriptor(keys.SystemDatabaseID, &ReplicationConstraintStatsTable) - target.AddDescriptor(keys.SystemDatabaseID, &ReplicationStatsTable) - target.AddDescriptor(keys.SystemDatabaseID, &ReplicationCriticalLocalitiesTable) - target.AddDescriptor(keys.SystemDatabaseID, &ProtectedTimestampsMetaTable) - target.AddDescriptor(keys.SystemDatabaseID, &ProtectedTimestampsRecordsTable) + target.AddDescriptor(keys.SystemDatabaseID, CommentsTable) + target.AddDescriptor(keys.SystemDatabaseID, ReportsMetaTable) + target.AddDescriptor(keys.SystemDatabaseID, ReplicationConstraintStatsTable) + target.AddDescriptor(keys.SystemDatabaseID, ReplicationStatsTable) + target.AddDescriptor(keys.SystemDatabaseID, ReplicationCriticalLocalitiesTable) + target.AddDescriptor(keys.SystemDatabaseID, ProtectedTimestampsMetaTable) + target.AddDescriptor(keys.SystemDatabaseID, ProtectedTimestampsRecordsTable) // Tables introduced in 20.1. - target.AddDescriptor(keys.SystemDatabaseID, &StatementBundleChunksTable) - target.AddDescriptor(keys.SystemDatabaseID, &StatementDiagnosticsRequestsTable) - target.AddDescriptor(keys.SystemDatabaseID, &StatementDiagnosticsTable) + + target.AddDescriptor(keys.SystemDatabaseID, StatementBundleChunksTable) + target.AddDescriptor(keys.SystemDatabaseID, StatementDiagnosticsRequestsTable) + target.AddDescriptor(keys.SystemDatabaseID, StatementDiagnosticsTable) // Tables introduced in 20.2. - target.AddDescriptor(keys.SystemDatabaseID, &ScheduledJobsTable) + + target.AddDescriptor(keys.SystemDatabaseID, ScheduledJobsTable) } // addSplitIDs adds a split point for each of the PseudoTableIDs to the supplied diff --git a/pkg/sql/sqlbase/system_test.go b/pkg/sql/sqlbase/system_test.go index ba8e457381c2..2ddbf20b8bd1 100644 --- a/pkg/sql/sqlbase/system_test.go +++ b/pkg/sql/sqlbase/system_test.go @@ -18,15 +18,15 @@ import ( ) func TestShouldSplitAtDesc(t *testing.T) { - for inner, should := range map[DescriptorProto]bool{ - &TableDescriptor{}: true, - &TableDescriptor{ViewQuery: "SELECT"}: false, - &DatabaseDescriptor{}: false, - &TypeDescriptor{}: false, - &SchemaDescriptor{}: false, + for inner, should := range map[DescriptorInterface]bool{ + NewImmutableTableDescriptor(TableDescriptor{}): true, + NewImmutableTableDescriptor(TableDescriptor{ViewQuery: "SELECT"}): false, + NewInitialDatabaseDescriptor(42, "db"): false, + NewMutableCreatedTypeDescriptor(TypeDescriptor{}): false, + NewImmutableSchemaDescriptor(SchemaDescriptor{}): false, } { var rawDesc roachpb.Value - require.NoError(t, rawDesc.SetProto(WrapDescriptor(inner))) + require.NoError(t, rawDesc.SetProto(inner.DescriptorProto())) require.Equal(t, should, ShouldSplitAtDesc(&rawDesc)) } } diff --git a/pkg/sql/sqlbase/table.go b/pkg/sql/sqlbase/table.go index d392da1c2f25..4d7187d3ab53 100644 --- a/pkg/sql/sqlbase/table.go +++ b/pkg/sql/sqlbase/table.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" "golang.org/x/text/language" @@ -579,6 +580,9 @@ func FindFKOriginIndexInTxn( // ConditionFailedError on mismatch. We don't directly use CPut with protos // because the marshaling is not guaranteed to be stable and also because it's // sensitive to things like missing vs default values of fields. +// +// TODO(ajwerner): Make this take a TableDescriptorInterface and probably add +// an equality method on that interface or something like that. func ConditionalGetTableDescFromTxn( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, expectation *TableDescriptor, ) (*roachpb.Value, error) { @@ -596,7 +600,7 @@ func ConditionalGetTableDescFromTxn( } existing.Table(existingKV.Value.Timestamp) } - wrapped := WrapDescriptor(expectation) + wrapped := wrapDescriptor(expectation) if !existing.Equal(wrapped) { return nil, &roachpb.ConditionFailedError{ActualValue: existingKV.Value} } @@ -644,3 +648,44 @@ func HasAddingTableError(err error) bool { func HasInactiveTableError(err error) bool { return errors.HasType(err, (*inactiveTableError)(nil)) } + +// InitTableDescriptor returns a blank TableDescriptor. +func InitTableDescriptor( + id, parentID, parentSchemaID ID, + name string, + creationTime hlc.Timestamp, + privileges *PrivilegeDescriptor, + temporary bool, +) MutableTableDescriptor { + return MutableTableDescriptor{TableDescriptor: TableDescriptor{ + ID: id, + Name: name, + ParentID: parentID, + UnexposedParentSchemaID: parentSchemaID, + FormatVersion: InterleavedFormatVersion, + Version: 1, + ModificationTime: creationTime, + Privileges: privileges, + CreateAsOfTime: creationTime, + Temporary: temporary, + }} +} + +// NewMutableTableDescriptorAsReplacement creates a new MutableTableDescriptor +// as a replacement of an existing table. This is utilized with truncate. +// +// The passed readTimestamp is serialized into the descriptor's ReplacementOf +// field for debugging purposes. The passed id will be the ID of the newly +// returned replacement. +func NewMutableTableDescriptorAsReplacement( + id ID, replacementOf *MutableTableDescriptor, readTimestamp hlc.Timestamp, +) *MutableTableDescriptor { + replacement := &MutableTableDescriptor{TableDescriptor: replacementOf.TableDescriptor} + replacement.ID = id + replacement.Version = 1 + replacement.ReplacementOf = TableDescriptor_Replacement{ + ID: replacementOf.ID, + Time: readTimestamp, + } + return replacement +} diff --git a/pkg/sql/sqlbase/table_desc.go b/pkg/sql/sqlbase/table_desc.go new file mode 100644 index 000000000000..ac3d8b005e28 --- /dev/null +++ b/pkg/sql/sqlbase/table_desc.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sqlbase + +import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + +var _ TableDescriptorInterface = (*ImmutableTableDescriptor)(nil) +var _ TableDescriptorInterface = (*MutableTableDescriptor)(nil) + +// TableDescriptorInterface is an interface around the table descriptor types. +// +// TODO(ajwerner): This interface likely belongs in a catalog/tabledesc package +// or perhaps in the catalog package directly. It's not clear how expansive this +// interface should be. Perhaps very. +type TableDescriptorInterface interface { + BaseDescriptorInterface + + GetParentID() ID + TableDesc() *TableDescriptor + FindColumnByName(name tree.Name) (*ColumnDescriptor, bool, error) +} diff --git a/pkg/sql/sqlbase/testutils.go b/pkg/sql/sqlbase/testutils.go index 9054d69dee1c..2696c9f2050a 100644 --- a/pkg/sql/sqlbase/testutils.go +++ b/pkg/sql/sqlbase/testutils.go @@ -48,7 +48,19 @@ import ( // This file contains utility functions for tests (in other packages). +// TestingGetMutableExistingTableDescriptor retrieves a MutableTableDescriptor +// directly from the KV layer. +func TestingGetMutableExistingTableDescriptor( + kvDB *kv.DB, codec keys.SQLCodec, database string, table string, +) *MutableTableDescriptor { + return NewMutableExistingTableDescriptor(*GetTableDescriptor(kvDB, codec, database, table)) +} + // GetTableDescriptor retrieves a table descriptor directly from the KV layer. +// +// TODO(ajwerner): Move this to catalogkv and/or question the very existence of +// this function. Consider renaming to TestingGetTableDescriptorByName or +// removing it altogether. func GetTableDescriptor( kvDB *kv.DB, codec keys.SQLCodec, database string, table string, ) *TableDescriptor { @@ -77,7 +89,7 @@ func GetTableDescriptor( descKey := MakeDescMetadataKey(codec, ID(gr.ValueInt())) desc := &Descriptor{} ts, err := kvDB.GetProtoTs(ctx, descKey, desc) - if err != nil || (*desc == Descriptor{}) { + if err != nil || desc.Equal(Descriptor{}) { log.Fatalf(ctx, "proto with id %d missing. err: %v", gr.ValueInt(), err) } tableDesc := desc.Table(ts) diff --git a/pkg/sql/sqlbase/type_desc.go b/pkg/sql/sqlbase/type_desc.go new file mode 100644 index 000000000000..cc018cf318a2 --- /dev/null +++ b/pkg/sql/sqlbase/type_desc.go @@ -0,0 +1,270 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sqlbase + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +// GetTypeDescFromID retrieves the type descriptor for the type ID passed +// in using an existing proto getter. It returns an error if the descriptor +// doesn't exist or if it exists and is not a type descriptor. +// +// TODO(ajwerner): Move this to catalogkv or something like it. +func GetTypeDescFromID( + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, +) (*ImmutableTypeDescriptor, error) { + descKey := MakeDescMetadataKey(codec, id) + desc := &Descriptor{} + _, err := protoGetter.GetProtoTs(ctx, descKey, desc) + if err != nil { + return nil, err + } + typ := desc.GetType() + if typ == nil { + return nil, ErrDescriptorNotFound + } + // TODO(ajwerner): Fill in ModificationTime. + return NewImmutableTypeDescriptor(*typ), nil +} + +// TypeDescriptorInterface will eventually be called typedesc.Descriptor. +// It is implemented by (Imm|M)utableTypeDescriptor. +type TypeDescriptorInterface interface { + BaseDescriptorInterface + TypeDesc() *TypeDescriptor + HydrateTypeInfoWithName(typ *types.T, name *tree.TypeName, typeLookup TypeLookupFunc) error + MakeTypesT(name *tree.TypeName, typeLookup TypeLookupFunc) (*types.T, error) +} + +var _ TypeDescriptorInterface = (*ImmutableTypeDescriptor)(nil) +var _ TypeDescriptorInterface = (*MutableTypeDescriptor)(nil) + +// MakeSimpleAliasTypeDescriptor creates a type descriptor that is an alias +// for the input type. It is intended to be used as an intermediate for name +// resolution, and should not be serialized and stored on disk. +func MakeSimpleAliasTypeDescriptor(typ *types.T) *ImmutableTypeDescriptor { + return NewImmutableTypeDescriptor(TypeDescriptor{ + ParentID: InvalidID, + ParentSchemaID: InvalidID, + Name: typ.Name(), + ID: InvalidID, + Kind: TypeDescriptor_ALIAS, + Alias: typ, + }) +} + +// NameResolutionResult implements the NameResolutionResult interface. +func (desc *TypeDescriptor) NameResolutionResult() {} + +// MutableTypeDescriptor is a custom type for TypeDescriptors undergoing +// any types of modifications. +type MutableTypeDescriptor struct { + + // TODO(ajwerner): Decide whether we're okay embedding the + // ImmutableTypeDescriptor or whether we should be embedding some other base + // struct that implements the various methods. For now we have the trap that + // the code really wants direct field access and moving all access to + // getters on an interface is a bigger task. + ImmutableTypeDescriptor + + // ClusterVersion represents the version of the type descriptor read + // from the store. + ClusterVersion *ImmutableTypeDescriptor +} + +// ImmutableTypeDescriptor is a custom type for wrapping TypeDescriptors +// when used in a read only way. +type ImmutableTypeDescriptor struct { + TypeDescriptor +} + +// NewMutableCreatedTypeDescriptor returns a MutableTypeDescriptor from the +// given type descriptor with the cluster version being the zero type. This +// is for a type that is created in the same transaction. +func NewMutableCreatedTypeDescriptor(desc TypeDescriptor) *MutableTypeDescriptor { + return &MutableTypeDescriptor{ + ImmutableTypeDescriptor: makeImmutableTypeDescriptor(desc), + } +} + +// NewMutableExistingTypeDescriptor returns a MutableTypeDescriptor from the +// given type descriptor with the cluster version also set to the descriptor. +// This is for types that already exist. +func NewMutableExistingTypeDescriptor(desc TypeDescriptor) *MutableTypeDescriptor { + return &MutableTypeDescriptor{ + ImmutableTypeDescriptor: makeImmutableTypeDescriptor(*protoutil.Clone(&desc).(*TypeDescriptor)), + ClusterVersion: NewImmutableTypeDescriptor(desc), + } +} + +// NewImmutableTypeDescriptor returns an ImmutableTypeDescriptor from the +// given TypeDescriptor. +func NewImmutableTypeDescriptor(desc TypeDescriptor) *ImmutableTypeDescriptor { + m := makeImmutableTypeDescriptor(desc) + return &m +} + +func makeImmutableTypeDescriptor(desc TypeDescriptor) ImmutableTypeDescriptor { + return ImmutableTypeDescriptor{TypeDescriptor: desc} +} + +// DescriptorProto returns a Descriptor for serialization. +func (desc *ImmutableTypeDescriptor) DescriptorProto() *Descriptor { + return &Descriptor{ + Union: &Descriptor_Type{ + Type: &desc.TypeDescriptor, + }, + } +} + +// DatabaseDesc implements the ObjectDescriptor interface. +func (desc *ImmutableTypeDescriptor) DatabaseDesc() *DatabaseDescriptor { + return nil +} + +// SchemaDesc implements the ObjectDescriptor interface. +func (desc *ImmutableTypeDescriptor) SchemaDesc() *SchemaDescriptor { + return nil +} + +// TableDesc implements the ObjectDescriptor interface. +func (desc *ImmutableTypeDescriptor) TableDesc() *TableDescriptor { + return nil +} + +// TypeDesc implements the ObjectDescriptor interface. +func (desc *ImmutableTypeDescriptor) TypeDesc() *TypeDescriptor { + return &desc.TypeDescriptor +} + +// GetAuditMode implements the DescriptorProto interface. +func (desc *ImmutableTypeDescriptor) GetAuditMode() TableDescriptor_AuditMode { + return TableDescriptor_DISABLED +} + +// GetPrivileges implements the DescriptorProto interface. +// +// Types do not carry privileges. +func (desc *ImmutableTypeDescriptor) GetPrivileges() *PrivilegeDescriptor { + return nil +} + +// TypeName implements the DescriptorProto interface. +func (desc *ImmutableTypeDescriptor) TypeName() string { + return "type" +} + +// MakeTypesT creates a types.T from the input type descriptor. +func (desc *ImmutableTypeDescriptor) MakeTypesT( + name *tree.TypeName, typeLookup TypeLookupFunc, +) (*types.T, error) { + switch t := desc.Kind; t { + case TypeDescriptor_ENUM: + typ := types.MakeEnum(uint32(desc.GetID()), uint32(desc.ArrayTypeID)) + if err := desc.HydrateTypeInfoWithName(typ, name, typeLookup); err != nil { + return nil, err + } + return typ, nil + case TypeDescriptor_ALIAS: + // Hydrate the alias and return it. + if err := desc.HydrateTypeInfoWithName(desc.Alias, name, typeLookup); err != nil { + return nil, err + } + return desc.Alias, nil + default: + return nil, errors.AssertionFailedf("unknown type kind %s", t.String()) + } +} + +// TypeLookupFunc is a type alias for a function that looks up a type by ID. +type TypeLookupFunc func(id ID) (*tree.TypeName, TypeDescriptorInterface, error) + +// HydrateTypesInTableDescriptor uses typeLookup to install metadata in the +// types present in a table descriptor. typeLookup retrieves the fully +// qualified name and descriptor for a particular ID. +func HydrateTypesInTableDescriptor(desc *TableDescriptor, typeLookup TypeLookupFunc) error { + for i := range desc.Columns { + col := &desc.Columns[i] + if col.Type.UserDefined() { + // Look up its type descriptor. + name, typDesc, err := typeLookup(ID(col.Type.StableTypeID())) + if err != nil { + return err + } + // TODO (rohany): This should be a noop if the hydrated type + // information present in the descriptor has the same version as + // the resolved type descriptor we found here. + if err := typDesc.HydrateTypeInfoWithName(col.Type, name, typeLookup); err != nil { + return err + } + } + } + return nil +} + +// HydrateTypeInfoWithName fills in user defined type metadata for +// a type and also sets the name in the metadata to the passed in name. +// This is used when hydrating a type with a known qualified name. +// +// TODO (rohany): This method should eventually be defined on an +// ImmutableTypeDescriptor so that pointers to the cached info +// can be shared among callers. +func (desc *ImmutableTypeDescriptor) HydrateTypeInfoWithName( + typ *types.T, name *tree.TypeName, typeLookup TypeLookupFunc, +) error { + typ.TypeMeta.Name = types.MakeUserDefinedTypeName(name.Catalog(), name.Schema(), name.Object()) + switch desc.Kind { + case TypeDescriptor_ENUM: + if typ.Family() != types.EnumFamily { + return errors.New("cannot hydrate a non-enum type with an enum type descriptor") + } + logical := make([]string, len(desc.EnumMembers)) + physical := make([][]byte, len(desc.EnumMembers)) + for i := range desc.EnumMembers { + member := &desc.EnumMembers[i] + logical[i] = member.LogicalRepresentation + physical[i] = member.PhysicalRepresentation + } + typ.TypeMeta.EnumData = &types.EnumMetadata{ + LogicalRepresentations: logical, + PhysicalRepresentations: physical, + } + return nil + case TypeDescriptor_ALIAS: + if typ.UserDefined() { + switch typ.Family() { + case types.ArrayFamily: + // Hydrate the element type. + elemType := typ.ArrayContents() + elemTypName, elemTypDesc, err := typeLookup(ID(elemType.StableTypeID())) + if err != nil { + return err + } + if err := elemTypDesc.HydrateTypeInfoWithName(elemType, elemTypName, typeLookup); err != nil { + return err + } + return nil + default: + return errors.AssertionFailedf("only array types aliases can be user defined") + } + } + return nil + default: + return errors.AssertionFailedf("unknown type descriptor kind %s", desc.Kind) + } +} diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index 1fc54aac941b..1759eccabc0a 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -327,8 +327,8 @@ func parseStats( // collecting the stats. Changes to types are backwards compatible across // versions, so using a newer version of the type metadata here is safe. err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - typeLookup := func(id sqlbase.ID) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { - return resolver.ResolveTypeDescByID(ctx, txn, codec, id) + typeLookup := func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { + return resolver.ResolveTypeDescByID(ctx, txn, codec, id, tree.ObjectLookupFlags{}) } name, typeDesc, err := typeLookup(sqlbase.ID(typ.StableTypeID())) if err != nil { diff --git a/pkg/sql/table.go b/pkg/sql/table.go index aeae3b015498..7339ca387bbd 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -286,6 +286,6 @@ func (p *planner) writeTableDescToBatch( b, p.ExecCfg().Codec, tableDesc.GetID(), - tableDesc.TableDesc(), + tableDesc, ) } diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index bda24c1266a7..bb4c3bfb32a8 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -331,7 +331,7 @@ CREATE TABLE test.tt (x test.t); t.Fatal(err) } desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "tt") - typLookup := func(id sqlbase.ID) (*tree.TypeName, *sqlbase.TypeDescriptor, error) { + typLookup := func(id sqlbase.ID) (*tree.TypeName, sqlbase.TypeDescriptorInterface, error) { typDesc, err := sqlbase.GetTypeDescFromID(ctx, kvDB, keys.SystemSQLCodec, id) if err != nil { return nil, nil, err diff --git a/pkg/sql/telemetry_test.go b/pkg/sql/telemetry_test.go index fd5ccb9d6fd9..de0171eab248 100644 --- a/pkg/sql/telemetry_test.go +++ b/pkg/sql/telemetry_test.go @@ -45,19 +45,19 @@ import ( // Executes SQL statements against the database. Outputs no results on // success. In case of error, outputs the error message. // -// - feature-whitelist +// - feature-allowlist // // The input for this command is not SQL, but a list of regular expressions. -// Tests that follow (until the next feature-whitelist command) will only -// output counters that match a regexp in this white list. +// Tests that follow (until the next feature-allowlist command) will only +// output counters that match a regexp in this allow list. // // - feature-usage, feature-counters // // Executes SQL statements and then outputs the feature counters from the -// white list that have been reported to the diagnostic server. The first +// allowlist that have been reported to the diagnostic server. The first // variant outputs only the names of the counters that changed; the second // variant outputs the counts as well. It is necessary to use -// feature-whitelist before these commands to avoid test flakes (e.g. because +// feature-allowlist before these commands to avoid test flakes (e.g. because // of counters that are changed by looking up descriptors) // // - schema @@ -103,7 +103,7 @@ func TestTelemetry(t *testing.T) { // issued multiple times. runner.Exec(t, "SET CLUSTER SETTING sql.query_cache.enabled = false") - var whitelist featureWhitelist + var allowlist featureAllowlist datadriven.RunTest(t, path, func(t *testing.T, td *datadriven.TestData) string { switch td.Cmd { case "exec": @@ -125,9 +125,9 @@ func TestTelemetry(t *testing.T) { } return buf.String() - case "feature-whitelist": + case "feature-allowlist": var err error - whitelist, err = makeWhitelist(strings.Split(td.Input, "\n")) + allowlist, err = makeAllowlist(strings.Split(td.Input, "\n")) if err != nil { td.Fatalf(t, "error parsing feature regex: %s", err) } @@ -150,8 +150,8 @@ func TestTelemetry(t *testing.T) { // Ignore zero values (shouldn't happen in practice) continue } - if !whitelist.Match(k) { - // Feature key not in whitelist. + if !allowlist.Match(k) { + // Feature key not in allowlist. continue } keys = append(keys, k) @@ -193,10 +193,10 @@ func TestTelemetry(t *testing.T) { }) } -type featureWhitelist []*regexp.Regexp +type featureAllowlist []*regexp.Regexp -func makeWhitelist(strings []string) (featureWhitelist, error) { - w := make(featureWhitelist, len(strings)) +func makeAllowlist(strings []string) (featureAllowlist, error) { + w := make(featureAllowlist, len(strings)) for i := range strings { var err error w[i], err = regexp.Compile("^" + strings[i] + "$") @@ -207,9 +207,9 @@ func makeWhitelist(strings []string) (featureWhitelist, error) { return w, nil } -func (w featureWhitelist) Match(feature string) bool { +func (w featureAllowlist) Match(feature string) bool { if w == nil { - // Unset whitelist matches all counters. + // Unset allowlist matches all counters. return true } for _, r := range w { diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 3eab640a1d85..1c3e818bd5d7 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -307,7 +307,7 @@ func cleanupSchemaObjects( for _, col := range dTableDesc.Columns { if dependentColIDs.Contains(int(col.ID)) { tbName := tree.MakeTableNameWithSchema( - tree.Name(db.Name), + tree.Name(db.GetName()), tree.Name(schema), tree.Name(dTableDesc.Name), ) diff --git a/pkg/sql/testdata/telemetry/error b/pkg/sql/testdata/telemetry/error index acc46d357955..1c7a035a9a5c 100644 --- a/pkg/sql/testdata/telemetry/error +++ b/pkg/sql/testdata/telemetry/error @@ -1,6 +1,6 @@ # This file contains telemetry tests for counters triggered by errors. -feature-whitelist +feature-allowlist othererror.* errorcodes.* unimplemented.* diff --git a/pkg/sql/testdata/telemetry/planning b/pkg/sql/testdata/telemetry/planning index 201c7e7d700e..a57e41a1b30e 100644 --- a/pkg/sql/testdata/telemetry/planning +++ b/pkg/sql/testdata/telemetry/planning @@ -6,7 +6,7 @@ CREATE TABLE x (a INT PRIMARY KEY) # Tests for EXPLAIN counters. -feature-whitelist +feature-allowlist sql.plan.explain sql.plan.explain-analyze sql.plan.explain-opt @@ -45,7 +45,7 @@ sql.plan.explain-opt-verbose # Tests for hints. -feature-whitelist +feature-allowlist sql.plan.hints.* ---- @@ -84,7 +84,7 @@ sql.plan.hints.index.delete # Tests for tracking important setting changes. -feature-whitelist +feature-allowlist sql.plan.reorder-joins.* sql.plan.automatic-stats.* ---- @@ -125,7 +125,7 @@ RESET CLUSTER SETTING sql.stats.automatic_collection.enabled sql.plan.automatic-stats.enabled # Test telemetry for manual statistics creation. -feature-whitelist +feature-allowlist sql.plan.stats.created ---- @@ -135,7 +135,7 @@ CREATE STATISTICS stats FROM x sql.plan.stats.created # Test various planning counters. -feature-whitelist +feature-allowlist sql.plan.cte.* sql.plan.lateral-join sql.plan.subquery.* @@ -174,7 +174,7 @@ sql.plan.subquery.correlated # Test some sql.plan.ops counters, using some esoteric operators unlikely to be # executed in background activity). -feature-whitelist +feature-allowlist sql.plan.ops.cast.string::inet sql.plan.ops.bin.jsonb - string sql.plan.ops.array.* @@ -202,7 +202,7 @@ INSERT INTO x SELECT unnest(ARRAY[9, 10, 11, 12]) sql.plan.ops.array.cons # Test a few sql.plan.opt.node counters. -feature-whitelist +feature-allowlist sql.plan.opt.node.project-set sql.plan.opt.node.join.* ---- diff --git a/pkg/sql/testdata/telemetry/schema b/pkg/sql/testdata/telemetry/schema index 89f8e6ababc4..70f6bea8217a 100644 --- a/pkg/sql/testdata/telemetry/schema +++ b/pkg/sql/testdata/telemetry/schema @@ -36,7 +36,7 @@ table:_ ├── _: _ └── _: _ -feature-whitelist +feature-allowlist sql.schema.* ---- diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index 860df610513f..50f8e2220d93 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -65,7 +65,7 @@ func TestInitialKeys(t *testing.T) { if err != nil { t.Fatal(err) } - ms.AddDescriptor(keys.SystemDatabaseID, &desc) + ms.AddDescriptor(keys.SystemDatabaseID, desc) kv, _ /* splits */ = ms.GetInitialValues() expected = nonDescKeys + keysPerDesc*ms.SystemDescriptorCount() if actual := len(kv); actual != expected { @@ -154,7 +154,7 @@ func TestSystemTableLiterals(t *testing.T) { type testcase struct { id sqlbase.ID schema string - pkg sqlbase.TableDescriptor + pkg *sqlbase.ImmutableTableDescriptor } for _, test := range []testcase{ @@ -196,7 +196,7 @@ func TestSystemTableLiterals(t *testing.T) { } require.NoError(t, gen.ValidateTable()) - if !proto.Equal(&test.pkg, &gen) { + if !proto.Equal(test.pkg.TableDesc(), gen.TableDesc()) { diff := strings.Join(pretty.Diff(&test.pkg, &gen), "\n") t.Errorf("%s table descriptor generated from CREATE TABLE statement does not match "+ "hardcoded table descriptor:\n%s", test.pkg.Name, diff) diff --git a/pkg/sql/testutils.go b/pkg/sql/testutils.go index f8a9fb742faa..71af24d4524f 100644 --- a/pkg/sql/testutils.go +++ b/pkg/sql/testutils.go @@ -31,11 +31,11 @@ func CreateTestTableDescriptor( parentID, id sqlbase.ID, schema string, privileges *sqlbase.PrivilegeDescriptor, -) (sqlbase.TableDescriptor, error) { +) (*sqlbase.MutableTableDescriptor, error) { st := cluster.MakeTestingClusterSettings() stmt, err := parser.ParseOne(schema) if err != nil { - return sqlbase.TableDescriptor{}, err + return nil, err } semaCtx := tree.MakeSemaContext() evalCtx := tree.MakeTestingEvalContext(st) @@ -56,7 +56,7 @@ func CreateTestTableDescriptor( &sessiondata.SessionData{}, /* sessionData */ false, /* temporary */ ) - return desc.TableDescriptor, err + return &desc, err case *tree.CreateSequence: desc, err := MakeSequenceTableDesc( n.Name.Table(), @@ -67,9 +67,9 @@ func CreateTestTableDescriptor( false, /* temporary */ nil, /* params */ ) - return desc.TableDescriptor, err + return &desc, err default: - return sqlbase.TableDescriptor{}, errors.Errorf("unexpected AST %T", stmt.AST) + return nil, errors.Errorf("unexpected AST %T", stmt.AST) } } diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 1f8c7b014175..2325289a22c2 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -168,16 +168,14 @@ func (p *planner) truncateTable( if err != nil { return err } - // tableDesc.DropJobID = dropJobID - newTableDesc := sqlbase.NewMutableCreatedTableDescriptor(tableDesc.TableDescriptor) - newTableDesc.ReplacementOf = sqlbase.TableDescriptor_Replacement{ - ID: id, - // NB: Time is just used for debugging purposes. See the comment on the - // field for more details. - Time: p.txn.ReadTimestamp(), + + newID, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec) + if err != nil { + return err } - newTableDesc.SetID(0) - newTableDesc.Version = 1 + // tableDesc.DropJobID = dropJobID + newTableDesc := sqlbase.NewMutableTableDescriptorAsReplacement( + newID, tableDesc, p.txn.ReadTimestamp()) // Remove old name -> id map. // This is a violation of consistency because once the TRUNCATE commits @@ -211,11 +209,6 @@ func (p *planner) truncateTable( return err } - newID, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec) - if err != nil { - return err - } - // update all the references to this table. tables, err := p.findAllReferences(ctx, *tableDesc) if err != nil { diff --git a/pkg/sql/types/types.go b/pkg/sql/types/types.go index 5ac49da7a593..5b8ba6d5d29a 100644 --- a/pkg/sql/types/types.go +++ b/pkg/sql/types/types.go @@ -241,8 +241,8 @@ func (u UserDefinedTypeName) Basename() string { // FQName returns the fully qualified name. func (u UserDefinedTypeName) FQName() string { var sb strings.Builder - sb.WriteString(u.Catalog) - sb.WriteString(".") + // Note that cross-database type references are disabled, so we only + // format the qualified name with the schema. sb.WriteString(u.Schema) sb.WriteString(".") sb.WriteString(u.Name) diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index 42fd22534506..1b7b8ce160b4 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -107,7 +107,7 @@ func (n *unsplitAllNode) startExec(params runParams) error { ranges, err := params.p.ExtendedEvalContext().InternalExecutor.(*InternalExecutor).QueryEx( params.ctx, "split points query", params.p.txn, sqlbase.InternalExecutorSessionDataOverride{}, statement, - dbDesc.Name, + dbDesc.GetName(), n.tableDesc.Name, indexName, ) diff --git a/pkg/sql/virtual_schema.go b/pkg/sql/virtual_schema.go index 3f54c5679ddf..cff5253da5e3 100644 --- a/pkg/sql/virtual_schema.go +++ b/pkg/sql/virtual_schema.go @@ -14,6 +14,7 @@ import ( "context" "math" "sort" + "time" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -64,7 +65,7 @@ type virtualSchemaDef interface { type virtualIndex struct { // populate populates the table given the constraint. matched is true if any // rows were generated. - populate func(ctx context.Context, constraint tree.Datum, p *planner, db *DatabaseDescriptor, + populate func(ctx context.Context, constraint tree.Datum, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error, ) (matched bool, err error) @@ -89,7 +90,7 @@ type virtualSchemaTable struct { // populate, if non-nil, is a function that is used when creating a // valuesNode. This function eagerly loads every row of the virtual table // during initialization of the valuesNode. - populate func(ctx context.Context, p *planner, db *DatabaseDescriptor, addRow func(...tree.Datum) error) error + populate func(ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor, addRow func(...tree.Datum) error) error // indexes, if non empty, is a slice of populate methods that also take a // constraint, only generating rows that match the constraint. The order of @@ -100,7 +101,7 @@ type virtualSchemaTable struct { // generator, if non-nil, is a function that is used when creating a // virtualTableNode. This function returns a virtualTableGenerator function // which generates the next row of the virtual table when called. - generator func(ctx context.Context, p *planner, db *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) + generator func(ctx context.Context, p *planner, db *sqlbase.ImmutableDatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) } // virtualSchemaView represents a view within a virtualSchema @@ -157,7 +158,7 @@ func (t virtualSchemaTable) initVirtualTableDesc( 0, /* parentID */ parentSchemaID, id, - hlc.Timestamp{}, /* creationTime */ + startTime, /* creationTime */ publicSelectPrivileges, nil, /* affected */ nil, /* semaCtx */ @@ -235,7 +236,7 @@ func (v virtualSchemaView) initVirtualTableDesc( parentSchemaID, id, columns, - hlc.Timestamp{}, /* creationTime */ + startTime, /* creationTime */ publicSelectPrivileges, nil, /* semaCtx */ nil, /* evalCtx */ @@ -260,6 +261,10 @@ var virtualSchemas = map[sqlbase.ID]virtualSchema{ sqlbase.PgExtensionSchemaID: pgExtension, } +var startTime = hlc.Timestamp{ + WallTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).UnixNano(), +} + // // SQL-layer interface to work with virtual schemas. // @@ -287,7 +292,7 @@ var _ catalog.VirtualSchemas = (*VirtualSchemaHolder)(nil) type virtualSchemaEntry struct { // TODO(ajwerner): Use a sqlbase.SchemaDescriptor here as part of the // user-defined schema work. - desc *sqlbase.DatabaseDescriptor + desc *sqlbase.ImmutableDatabaseDescriptor defs map[string]virtualDefEntry orderedDefNames []string allTableNames map[string]struct{} @@ -320,8 +325,8 @@ func (v virtualSchemaEntry) GetObjectByName( return &def, nil } if _, ok := v.allTableNames[name]; ok { - return nil, unimplemented.Newf(v.desc.Name+"."+name, - "virtual schema table not implemented: %s.%s", v.desc.Name, name) + return nil, unimplemented.Newf(v.desc.GetName()+"."+name, + "virtual schema table not implemented: %s.%s", v.desc.GetName(), name) } return nil, nil case tree.TypeObject: @@ -346,6 +351,7 @@ func (v virtualSchemaEntry) GetObjectByName( if !ok { return nil, nil } + return virtualTypeEntry{ desc: sqlbase.MakeSimpleAliasTypeDescriptor(typ), mutable: flags.RequireMutable, @@ -375,15 +381,14 @@ func (e mutableVirtualDefEntry) Desc() catalog.Descriptor { } type virtualTypeEntry struct { - desc *sqlbase.TypeDescriptor + desc *sqlbase.ImmutableTypeDescriptor mutable bool } func (e virtualTypeEntry) Desc() catalog.Descriptor { - if e.mutable { - return sqlbase.NewMutableExistingTypeDescriptor(*e.desc) - } - return sqlbase.NewImmutableTypeDescriptor(*e.desc) + // TODO(ajwerner): Should this be allowed? I think no. Let's just store an + // ImmutableTypeDesc off of this thing. + return e.desc } type virtualTableConstructor func(context.Context, *planner, string) (planNode, error) @@ -442,14 +447,14 @@ func (e virtualDefEntry) getPlanInfo( } constructor := func(ctx context.Context, p *planner, dbName string) (planNode, error) { - var dbDesc *DatabaseDescriptor + var dbDesc *sqlbase.ImmutableDatabaseDescriptor if dbName != "" { - var err error - dbDesc, err = p.LogicalSchemaAccessor().GetDatabaseDesc(ctx, p.txn, p.ExecCfg().Codec, + dbDescI, err := p.LogicalSchemaAccessor().GetDatabaseDesc(ctx, p.txn, p.ExecCfg().Codec, dbName, tree.DatabaseLookupFlags{Required: true, AvoidCached: p.avoidCachedDescriptors}) if err != nil { return nil, err } + dbDesc = dbDescI.(*sqlbase.ImmutableDatabaseDescriptor) } else { if !e.validWithNoDatabaseContext { return nil, errInvalidDbPrefix @@ -512,7 +517,7 @@ func (e virtualDefEntry) getPlanInfo( func (e virtualDefEntry) makeConstrainedRowsGenerator( ctx context.Context, p *planner, - dbDesc *DatabaseDescriptor, + dbDesc *sqlbase.ImmutableDatabaseDescriptor, index *sqlbase.IndexDescriptor, indexKeyDatums []tree.Datum, columnIdxMap map[sqlbase.ColumnID]int, @@ -654,12 +659,8 @@ func NewVirtualSchemaHolder( // user has access to. var publicSelectPrivileges = sqlbase.NewPrivilegeDescriptor(sqlbase.PublicRole, privilege.List{privilege.SELECT}) -func initVirtualDatabaseDesc(id sqlbase.ID, name string) *sqlbase.DatabaseDescriptor { - return &sqlbase.DatabaseDescriptor{ - Name: name, - ID: id, - Privileges: publicSelectPrivileges, - } +func initVirtualDatabaseDesc(id sqlbase.ID, name string) *sqlbase.ImmutableDatabaseDescriptor { + return sqlbase.NewInitialDatabaseDescriptorWithPrivileges(id, name, publicSelectPrivileges) } // getEntries is part of the VirtualTabler interface. diff --git a/pkg/sql/virtual_table.go b/pkg/sql/virtual_table.go index d4a9fa752f7d..2aee92c31086 100644 --- a/pkg/sql/virtual_table.go +++ b/pkg/sql/virtual_table.go @@ -191,7 +191,7 @@ type vTableLookupJoinNode struct { input planNode dbName string - db *sqlbase.DatabaseDescriptor + db *sqlbase.ImmutableDatabaseDescriptor table *sqlbase.TableDescriptor index *sqlbase.IndexDescriptor // eqCol is the single equality column ordinal into the lookup table. Virtual @@ -241,13 +241,17 @@ func (v *vTableLookupJoinNode) startExec(params runParams) error { sqlbase.ColTypeInfoFromResCols(v.columns), 0) v.run.indexKeyDatums = make(tree.Datums, len(v.columns)) var err error - v.db, err = params.p.LogicalSchemaAccessor().GetDatabaseDesc( + db, err := params.p.LogicalSchemaAccessor().GetDatabaseDesc( params.ctx, params.p.txn, params.p.ExecCfg().Codec, v.dbName, tree.DatabaseLookupFlags{Required: true, AvoidCached: params.p.avoidCachedDescriptors}, ) + if err != nil { + return err + } + v.db = db.(*sqlbase.ImmutableDatabaseDescriptor) return err } diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index a7a4a1afa26b..767d60ecd149 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -251,12 +251,12 @@ func zoneSpecifierNotFoundError(zs tree.ZoneSpecifier) error { // Returns res = nil if the zone specifier is not for a table or index. func (p *planner) resolveTableForZone( ctx context.Context, zs *tree.ZoneSpecifier, -) (res *TableDescriptor, err error) { +) (res sqlbase.DescriptorInterface, err error) { if zs.TargetsIndex() { var mutRes *MutableTableDescriptor _, mutRes, err = expandMutableIndexName(ctx, p, &zs.TableOrIndex, true /* requireTable */) if mutRes != nil { - res = mutRes.TableDesc() + res = mutRes } } else if zs.TargetsTable() { var immutRes *ImmutableTableDescriptor @@ -268,7 +268,7 @@ func (p *planner) resolveTableForZone( if err != nil { return nil, err } else if immutRes != nil { - res = immutRes.TableDesc() + res = immutRes } } return res, err @@ -302,7 +302,7 @@ func resolveZone(ctx context.Context, txn *kv.Txn, zs *tree.ZoneSpecifier) (sqlb } func resolveSubzone( - zs *tree.ZoneSpecifier, table *sqlbase.TableDescriptor, + zs *tree.ZoneSpecifier, table sqlbase.DescriptorInterface, ) (*sqlbase.IndexDescriptor, string, error) { if !zs.TargetsTable() || zs.TableOrIndex.Index == "" && zs.Partition == "" { return nil, "", nil @@ -311,11 +311,11 @@ func resolveSubzone( indexName := string(zs.TableOrIndex.Index) var index *sqlbase.IndexDescriptor if indexName == "" { - index = &table.PrimaryIndex + index = &table.TableDesc().PrimaryIndex indexName = index.Name } else { var err error - index, _, err = table.FindIndexByName(indexName) + index, _, err = table.TableDesc().FindIndexByName(indexName) if err != nil { return nil, "", err } diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index e29e202f21ef..59ebfe1e6c6a 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -1077,7 +1077,7 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis // // There are probably more efficient ways to do this part of the migration, // but the current approach seemed like the most straightforward. - var allDescs []sqlbase.DescriptorProto + var allDescs []sqlbase.DescriptorInterface schemaChangeJobsForDesc := make(map[sqlbase.ID][]int64) gcJobsForDesc := make(map[sqlbase.ID][]int64) if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -1176,42 +1176,41 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis log.Infof(ctx, "evaluating tables for creating jobs") for _, desc := range allDescs { - switch desc := desc.(type) { - case *sqlbase.TableDescriptor: - if scJobs := schemaChangeJobsForDesc[desc.ID]; len(scJobs) > 0 { - log.VEventf(ctx, 3, "table %d has running schema change jobs %v, skipping", desc.ID, scJobs) + if tableDesc, ok := desc.(*sqlbase.ImmutableTableDescriptor); ok { + if scJobs := schemaChangeJobsForDesc[tableDesc.ID]; len(scJobs) > 0 { + log.VEventf(ctx, 3, "table %d has running schema change jobs %v, skipping", tableDesc.ID, scJobs) continue - } else if gcJobs := gcJobsForDesc[desc.ID]; len(gcJobs) > 0 { - log.VEventf(ctx, 3, "table %d has running GC jobs %v, skipping", desc.ID, gcJobs) + } else if gcJobs := gcJobsForDesc[tableDesc.ID]; len(gcJobs) > 0 { + log.VEventf(ctx, 3, "table %d has running GC jobs %v, skipping", tableDesc.ID, gcJobs) continue } - if !desc.Adding() && !desc.Dropped() && !desc.HasDrainingNames() { + if !tableDesc.Adding() && !tableDesc.Dropped() && !tableDesc.HasDrainingNames() { log.VEventf(ctx, 3, "table %d is not being added or dropped and does not have draining names, skipping", - desc.ID, + tableDesc.ID, ) continue } if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - key := schemaChangeJobMigrationKeyForTable(r.codec, desc.ID) + key := schemaChangeJobMigrationKeyForTable(r.codec, tableDesc.ID) startTime := timeutil.Now().String() if kv, err := txn.Get(ctx, key); err != nil { return err } else if kv.Exists() { - log.VEventf(ctx, 3, "table %d already processed in migration", desc.ID) + log.VEventf(ctx, 3, "table %d already processed in migration", tableDesc.ID) return nil } - if desc.Adding() || desc.HasDrainingNames() { - if err := createSchemaChangeJobForTable(txn, desc); err != nil { + if tableDesc.Adding() || tableDesc.HasDrainingNames() { + if err := createSchemaChangeJobForTable(txn, tableDesc.TableDesc()); err != nil { return err } - } else if desc.Dropped() { + } else if tableDesc.Dropped() { // Note that a table can be both in the DROP state and have draining // names. In that case it was enough to just create a schema change // job, as in the case above, because that job will itself create a // GC job. - if err := createGCJobForTable(txn, desc); err != nil { + if err := createGCJobForTable(txn, tableDesc.TableDesc()); err != nil { return err } } @@ -1222,9 +1221,8 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis }); err != nil { return err } - case *sqlbase.DatabaseDescriptor: - // Do nothing. } + // Do nothing. } return nil @@ -1482,14 +1480,14 @@ func migrationKey(codec keys.SQLCodec, migration migrationDescriptor) roachpb.Ke return append(codec.MigrationKeyPrefix(), roachpb.RKey(migration.name)...) } -func createSystemTable(ctx context.Context, r runner, desc sqlbase.TableDescriptor) error { +func createSystemTable(ctx context.Context, r runner, desc sqlbase.TableDescriptorInterface) error { // We install the table at the KV layer so that we can choose a known ID in // the reserved ID space. (The SQL layer doesn't allow this.) err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() tKey := sqlbase.MakePublicTableNameKey(ctx, r.settings, desc.GetParentID(), desc.GetName()) b.CPut(tKey.Key(r.codec), desc.GetID(), nil) - b.CPut(sqlbase.MakeDescMetadataKey(r.codec, desc.GetID()), sqlbase.WrapDescriptor(&desc), nil) + b.CPut(sqlbase.MakeDescMetadataKey(r.codec, desc.GetID()), desc.DescriptorProto(), nil) if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1574,7 +1572,7 @@ func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { sqlbase.NamespaceTable.GetParentID(), sqlbase.NamespaceTableName) b.Put(nameKey.Key(r.codec), sqlbase.NamespaceTable.GetID()) b.Put(sqlbase.MakeDescMetadataKey( - r.codec, sqlbase.NamespaceTable.GetID()), sqlbase.WrapDescriptor(&sqlbase.NamespaceTable)) + r.codec, sqlbase.NamespaceTable.GetID()), sqlbase.NamespaceTable.DescriptorProto()) return txn.Run(ctx, b) }) } diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index 7bc624a9e715..73d7fc25a8cd 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -538,7 +538,7 @@ func TestCreateSystemTable(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() - table := sqlbase.NamespaceTable + table := sqlbase.NewMutableExistingTableDescriptor(sqlbase.NamespaceTable.TableDescriptor) table.ID = keys.MaxReservedDescID prevPrivileges, ok := sqlbase.SystemAllowedPrivileges[table.ID] @@ -555,7 +555,7 @@ func TestCreateSystemTable(t *testing.T) { table.Name = "dummy" nameKey := sqlbase.NewPublicTableKey(table.ParentID, table.Name).Key(keys.SystemSQLCodec) descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, table.ID) - descVal := sqlbase.WrapDescriptor(&table) + descVal := table.DescriptorProto() mt := makeMigrationTest(ctx, t) defer mt.close(ctx) @@ -794,7 +794,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { table := desc.Table(ts) table.CreateAsOfTime = sqlbase.NamespaceTable.CreateAsOfTime table.ModificationTime = sqlbase.NamespaceTable.ModificationTime - require.True(t, table.Equal(sqlbase.NamespaceTable)) + require.True(t, table.Equal(sqlbase.NamespaceTable.TableDesc())) } { ts, err := txn.GetProtoTs(ctx, deprecatedKey, desc) @@ -802,7 +802,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { table := desc.Table(ts) table.CreateAsOfTime = sqlbase.DeprecatedNamespaceTable.CreateAsOfTime table.ModificationTime = sqlbase.DeprecatedNamespaceTable.ModificationTime - require.True(t, table.Equal(sqlbase.DeprecatedNamespaceTable)) + require.True(t, table.Equal(sqlbase.DeprecatedNamespaceTable.TableDesc())) } return nil })) @@ -849,7 +849,7 @@ CREATE TABLE system.jobs ( require.Equal(t, oldPrimaryFamilyColumns, oldJobsTable.Families[0].ColumnNames) jobsTable := sqlbase.JobsTable - sqlbase.JobsTable = oldJobsTable + sqlbase.JobsTable = sqlbase.NewImmutableTableDescriptor(*oldJobsTable.TableDesc()) defer func() { sqlbase.JobsTable = jobsTable }() diff --git a/pkg/storage/bench_pebble_test.go b/pkg/storage/bench_pebble_test.go index 286e82d2bef5..051899a2801c 100644 --- a/pkg/storage/bench_pebble_test.go +++ b/pkg/storage/bench_pebble_test.go @@ -95,7 +95,6 @@ func BenchmarkExportToSst(b *testing.B) { numKeys := []int{64, 512, 1024, 8192, 65536} numRevisions := []int{1, 10, 100} exportAllRevisions := []bool{false, true} - contention := []bool{false, true} engineMakers := []struct { name string create engineMaker @@ -111,12 +110,8 @@ func BenchmarkExportToSst(b *testing.B) { for _, numRevision := range numRevisions { b.Run(fmt.Sprintf("numRevisions=%d", numRevision), func(b *testing.B) { for _, exportAllRevisionsVal := range exportAllRevisions { - b.Run(fmt.Sprintf("exportAllRevisions=%t", exportAllRevisions), func(b *testing.B) { - for _, contentionVal := range contention { - b.Run(fmt.Sprintf("contention=%t", contentionVal), func(b *testing.B) { - runExportToSst(context.Background(), b, engineImpl.create, numKey, numRevision, exportAllRevisionsVal, contentionVal) - }) - } + b.Run(fmt.Sprintf("exportAllRevisions=%t", exportAllRevisionsVal), func(b *testing.B) { + runExportToSst(b, engineImpl.create, numKey, numRevision, exportAllRevisionsVal) }) } }) diff --git a/pkg/storage/bench_test.go b/pkg/storage/bench_test.go index e05cc4bd9c11..bb1c6f578fa9 100644 --- a/pkg/storage/bench_test.go +++ b/pkg/storage/bench_test.go @@ -985,13 +985,7 @@ func runBatchApplyBatchRepr( } func runExportToSst( - ctx context.Context, - b *testing.B, - emk engineMaker, - numKeys int, - numRevisions int, - exportAllRevisions bool, - contention bool, + b *testing.B, emk engineMaker, numKeys int, numRevisions int, exportAllRevisions bool, ) { dir, cleanup := testutils.TempDir(b) defer cleanup() diff --git a/pkg/storage/cloud/azure_storage.go b/pkg/storage/cloud/azure_storage.go index 264947e613a8..12db2f8ab96e 100644 --- a/pkg/storage/cloud/azure_storage.go +++ b/pkg/storage/cloud/azure_storage.go @@ -100,6 +100,13 @@ func (s *azureStorage) ReadFile(ctx context.Context, basename string) (io.ReadCl blob := s.getBlob(basename) get, err := blob.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false) if err != nil { + if azerr := (azblob.StorageError)(nil); errors.As(err, &azerr) { + switch azerr.ServiceCode() { + // TODO(adityamaru): Investigate whether both these conditions are required. + case azblob.ServiceCodeBlobNotFound, azblob.ServiceCodeResourceNotFound: + return nil, errors.Wrapf(ErrFileDoesNotExist, "azure blob does not exist: %s", err.Error()) + } + } return nil, errors.Wrap(err, "failed to create azure reader") } reader := get.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3}) diff --git a/pkg/storage/cloud/external_storage.go b/pkg/storage/cloud/external_storage.go index bd164c9e7fe2..200bb34517c4 100644 --- a/pkg/storage/cloud/external_storage.go +++ b/pkg/storage/cloud/external_storage.go @@ -88,6 +88,11 @@ var redactedQueryParams = map[string]struct{}{ // ErrListingUnsupported is a marker for indicating listing is unsupported. var ErrListingUnsupported = errors.New("listing is not supported") +// ErrFileDoesNotExist is a sentinel error for indicating that a specified +// bucket/object/key/file (depending on storage terminology) does not exist. +// This error is raised by the ReadFile method. +var ErrFileDoesNotExist = errors.New("external_storage: file doesn't exist") + // ExternalStorageFactory describes a factory function for ExternalStorage. type ExternalStorageFactory func(ctx context.Context, dest roachpb.ExternalStorage) (ExternalStorage, error) @@ -113,6 +118,8 @@ type ExternalStorage interface { Conf() roachpb.ExternalStorage // ReadFile should return a Reader for requested name. + // ErrFileDoesNotExist is raised if `basename` cannot be located in storage. + // This can be leveraged for an existence check. ReadFile(ctx context.Context, basename string) (io.ReadCloser, error) // WriteFile should write the content to requested name. diff --git a/pkg/storage/cloud/external_storage_test.go b/pkg/storage/cloud/external_storage_test.go index 789c6fba52f3..c779b96bd4ed 100644 --- a/pkg/storage/cloud/external_storage_test.go +++ b/pkg/storage/cloud/external_storage_test.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/bank" + "github.com/cockroachdb/errors" "github.com/spf13/pflag" "github.com/stretchr/testify/require" "golang.org/x/oauth2/google" @@ -125,9 +126,8 @@ func testExportStoreWithExternalIOConfig( if !bytes.Equal(res, payload) { t.Fatalf("got %v expected %v", res, payload) } - if err := s.Delete(ctx, name); err != nil { - t.Fatal(err) - } + + require.NoError(t, s.Delete(ctx, name)) } }) @@ -160,9 +160,7 @@ func testExportStoreWithExternalIOConfig( if !bytes.Equal(content, testingContent) { t.Fatalf("wrong content") } - if err := s.Delete(ctx, testingFilename); err != nil { - t.Fatal(err) - } + require.NoError(t, s.Delete(ctx, testingFilename)) }) if skipSingleFile { return @@ -188,9 +186,7 @@ func testExportStoreWithExternalIOConfig( if !bytes.Equal(content, []byte("aaa")) { t.Fatalf("wrong content") } - if err := s.Delete(ctx, testingFilename); err != nil { - t.Fatal(err) - } + require.NoError(t, s.Delete(ctx, testingFilename)) }) t.Run("write-single-file-by-uri", func(t *testing.T) { const testingFilename = "B" @@ -214,9 +210,40 @@ func testExportStoreWithExternalIOConfig( if !bytes.Equal(content, []byte("bbb")) { t.Fatalf("wrong content") } - if err := s.Delete(ctx, testingFilename); err != nil { + + require.NoError(t, s.Delete(ctx, testingFilename)) + }) + // This test ensures that the ReadFile method of the ExternalStorage interface + // raises a sentinel error indicating that a requested bucket/key/file/object + // (based on the storage system) could not be found. + t.Run("file-does-not-exist", func(t *testing.T) { + const testingFilename = "A" + if err := s.WriteFile(ctx, testingFilename, bytes.NewReader([]byte("aaa"))); err != nil { + t.Fatal(err) + } + singleFile := storeFromURI(ctx, t, storeURI, clientFactory) + defer singleFile.Close() + + // Read a valid file. + res, err := singleFile.ReadFile(ctx, testingFilename) + if err != nil { t.Fatal(err) } + defer res.Close() + content, err := ioutil.ReadAll(res) + if err != nil { + t.Fatal(err) + } + // Verify the result contains what we wrote. + if !bytes.Equal(content, []byte("aaa")) { + t.Fatalf("wrong content") + } + + // Attempt to read a file which does not exist. + _, err = singleFile.ReadFile(ctx, "file_does_not_exist") + require.Error(t, err) + require.True(t, errors.Is(err, ErrFileDoesNotExist), "Expected a file does not exist error but returned %s") + require.NoError(t, s.Delete(ctx, testingFilename)) }) } diff --git a/pkg/storage/cloud/gcs_storage.go b/pkg/storage/cloud/gcs_storage.go index 4060123edc6d..0bde4855331a 100644 --- a/pkg/storage/cloud/gcs_storage.go +++ b/pkg/storage/cloud/gcs_storage.go @@ -228,6 +228,13 @@ func (g *gcsStorage) ReadFile(ctx context.Context, basename string) (io.ReadClos object: path.Join(g.prefix, basename), } if err := reader.openStream(); err != nil { + // The Google SDK has a specialized ErrBucketDoesNotExist error, but + // the code path from this method first triggers an ErrObjectNotExist in + // both scenarios - when a Bucket does not exist or an Object does not + // exist. + if errors.Is(err, gcs.ErrObjectNotExist) { + return nil, errors.Wrapf(ErrFileDoesNotExist, "gcs object does not exist: %s", err.Error()) + } return nil, err } return reader, nil diff --git a/pkg/storage/cloud/gcs_storage_test.go b/pkg/storage/cloud/gcs_storage_test.go index 05e3ad442066..171e71579e33 100644 --- a/pkg/storage/cloud/gcs_storage_test.go +++ b/pkg/storage/cloud/gcs_storage_test.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/cockroach/pkg/util/sysutil" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -78,7 +79,7 @@ func (c *antagonisticConn) Read(b []byte) (int, error) { func TestAntagonisticRead(t *testing.T) { if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { // This test requires valid GS credential file. - return + t.Skip("GOOGLE_APPLICATION_CREDENTIALS env var must be set") } rnd, _ := randutil.NewPseudoRand() @@ -112,3 +113,41 @@ func TestAntagonisticRead(t *testing.T) { _, err = ioutil.ReadAll(stream) require.NoError(t, err) } + +// TestFileDoesNotExist ensures that the ReadFile method of google cloud storage +// returns a sentinel error when the `Bucket` or `Object` being read do not +// exist. +func TestFileDoesNotExist(t *testing.T) { + if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { + // This test requires valid GS credential file. + t.Skip("GOOGLE_APPLICATION_CREDENTIALS env var must be set") + } + + { + // Invalid gsFile. + gsFile := "gs://cockroach-fixtures/tpch-csv/sf-1/invalid_region.tbl?AUTH=implicit" + conf, err := ExternalStorageConfFromURI(gsFile) + require.NoError(t, err) + + s, err := MakeExternalStorage( + context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil) + require.NoError(t, err) + _, err = s.ReadFile(context.Background(), "") + require.Error(t, err, "") + require.True(t, errors.Is(err, ErrFileDoesNotExist)) + } + + { + // Invalid gsBucket. + gsFile := "gs://cockroach-fixtures-invalid/tpch-csv/sf-1/region.tbl?AUTH=implicit" + conf, err := ExternalStorageConfFromURI(gsFile) + require.NoError(t, err) + + s, err := MakeExternalStorage( + context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil) + require.NoError(t, err) + _, err = s.ReadFile(context.Background(), "") + require.Error(t, err, "") + require.True(t, errors.Is(err, ErrFileDoesNotExist)) + } +} diff --git a/pkg/storage/cloud/http_storage.go b/pkg/storage/cloud/http_storage.go index 87911f4c5fb3..62f52ad0a0b0 100644 --- a/pkg/storage/cloud/http_storage.go +++ b/pkg/storage/cloud/http_storage.go @@ -346,11 +346,15 @@ func (h *httpStorage) req( switch resp.StatusCode { case 200, 201, 204, 206: - // Pass. + // Pass. default: body, _ := ioutil.ReadAll(resp.Body) _ = resp.Body.Close() - return nil, errors.Errorf("error response from server: %s %q", resp.Status, body) + err := errors.Errorf("error response from server: %s %q", resp.Status, body) + if err != nil && resp.StatusCode == 404 { + err = errors.Wrapf(ErrFileDoesNotExist, "http storage file does not exist: %s", err.Error()) + } + return nil, err } return resp, nil } diff --git a/pkg/storage/cloud/http_storage_test.go b/pkg/storage/cloud/http_storage_test.go index 0a8c64ee31d7..3a54810fd1b3 100644 --- a/pkg/storage/cloud/http_storage_test.go +++ b/pkg/storage/cloud/http_storage_test.go @@ -110,7 +110,7 @@ func TestPutHttp(t *testing.T) { srv, files, cleanup := makeServer() defer cleanup() testExportStore(t, srv.String(), false) - if expected, actual := 13, files(); expected != actual { + if expected, actual := 14, files(); expected != actual { t.Fatalf("expected %d files to be written to single http store, got %d", expected, actual) } }) diff --git a/pkg/storage/cloud/nodelocal_storage.go b/pkg/storage/cloud/nodelocal_storage.go index 99a2f07ca8b6..9d4873f7845d 100644 --- a/pkg/storage/cloud/nodelocal_storage.go +++ b/pkg/storage/cloud/nodelocal_storage.go @@ -14,6 +14,7 @@ import ( "context" "fmt" "io" + "os" "path" "strings" @@ -21,6 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type localFileStorage struct { @@ -78,7 +81,19 @@ func (l *localFileStorage) WriteFile( } func (l *localFileStorage) ReadFile(ctx context.Context, basename string) (io.ReadCloser, error) { - return l.blobClient.ReadFile(ctx, joinRelativePath(l.base, basename)) + var err error + var reader io.ReadCloser + if reader, err = l.blobClient.ReadFile(ctx, joinRelativePath(l.base, basename)); err != nil { + // The format of the error returned by the above ReadFile call differs based + // on whether we are reading from a local or remote nodelocal store. + // The local store returns a golang native ErrNotFound, whereas the remote + // store returns a gRPC native NotFound error. + if os.IsNotExist(err) || status.Code(err) == codes.NotFound { + return nil, errors.Wrapf(ErrFileDoesNotExist, "nodelocal storage file does not exist: %s", err.Error()) + } + return nil, err + } + return reader, nil } func (l *localFileStorage) ListFiles(ctx context.Context, patternSuffix string) ([]string, error) { diff --git a/pkg/storage/cloud/s3_storage.go b/pkg/storage/cloud/s3_storage.go index 4700a7e6d550..1e7d54777ed4 100644 --- a/pkg/storage/cloud/s3_storage.go +++ b/pkg/storage/cloud/s3_storage.go @@ -18,6 +18,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -171,6 +172,13 @@ func (s *s3Storage) ReadFile(ctx context.Context, basename string) (io.ReadClose Key: aws.String(path.Join(s.prefix, basename)), }) if err != nil { + if aerr := (awserr.Error)(nil); errors.As(err, &aerr) { + switch aerr.Code() { + // Relevant 404 errors reported by AWS. + case s3.ErrCodeNoSuchBucket, s3.ErrCodeNoSuchKey: + return nil, errors.Wrapf(ErrFileDoesNotExist, "s3 object does not exist: %s", err.Error()) + } + } return nil, errors.Wrap(err, "failed to get s3 object") } return out.Body, nil diff --git a/pkg/storage/cloud/s3_storage_test.go b/pkg/storage/cloud/s3_storage_test.go index 0eb9bd9a4ec3..e7641f38f023 100644 --- a/pkg/storage/cloud/s3_storage_test.go +++ b/pkg/storage/cloud/s3_storage_test.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -152,3 +153,56 @@ func TestS3DisallowImplicitCredentials(t *testing.T) { require.Error(t, err) require.True(t, strings.Contains(err.Error(), "implicit")) } + +// S3 has two "does not exist" errors - ErrCodeNoSuchBucket and ErrCodeNoSuchKey. +// ErrCodeNoSuchKey is tested via the general test in external_storage_test.go. +// This test attempts to ReadFile from a bucket which does not exist. +func TestS3BucketDoesNotExist(t *testing.T) { + defer leaktest.AfterTest(t)() + + q := make(url.Values) + expect := map[string]string{ + "AWS_S3_ENDPOINT": S3EndpointParam, + "AWS_S3_ENDPOINT_KEY": S3AccessKeyParam, + "AWS_S3_ENDPOINT_REGION": S3RegionParam, + "AWS_S3_ENDPOINT_SECRET": S3SecretParam, + } + for env, param := range expect { + v := os.Getenv(env) + if v == "" { + t.Skipf("%s env var must be set", env) + } + q.Add(param, v) + } + + bucket := "invalid-bucket" + u := url.URL{ + Scheme: "s3", + Host: bucket, + Path: "backup-test", + RawQuery: q.Encode(), + } + + ctx := context.Background() + + conf, err := ExternalStorageConfFromURI(u.String()) + if err != nil { + t.Fatal(err) + } + + // Setup a sink for the given args. + clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) + s, err := MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, clientFactory) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + if readConf := s.Conf(); readConf != conf { + t.Fatalf("conf does not roundtrip: started with %+v, got back %+v", conf, readConf) + } + + _, err = s.ReadFile(ctx, "") + require.Error(t, err, "") + require.True(t, errors.Is(err, ErrFileDoesNotExist)) +} diff --git a/pkg/testutils/buildutil/build.go b/pkg/testutils/buildutil/build.go index 4270b0b22a51..299bd8e7aa1e 100644 --- a/pkg/testutils/buildutil/build.go +++ b/pkg/testutils/buildutil/build.go @@ -38,7 +38,7 @@ func short(in string) string { // indirectly) on forbidden packages. The forbidden packages are specified as // either exact matches or prefix matches. // A match is not reported if the package that includes the forbidden package -// is listed in the whitelist. +// is listed in the allowlist. // If GOPATH isn't set, it is an indication that the source is not available and // the test is skipped. func VerifyNoImports( @@ -46,7 +46,7 @@ func VerifyNoImports( pkgPath string, cgo bool, forbiddenPkgs, forbiddenPrefixes []string, - whitelist ...string, + allowlist ...string, ) { // Skip test if source is not available. @@ -68,14 +68,14 @@ func VerifyNoImports( for _, imp := range pkg.Imports { for _, forbidden := range forbiddenPkgs { if forbidden == imp { - whitelisted := false - for _, w := range whitelist { + allowlisted := false + for _, w := range allowlist { if path == w { - whitelisted = true + allowlisted = true break } } - if !whitelisted { + if !allowlisted { return errors.Errorf("%s imports %s, which is forbidden", short(path), short(imp)) } } @@ -129,10 +129,10 @@ func VerifyNoImports( } } -// VerifyTransitiveWhitelist checks that the entire set of transitive -// dependencies of the given package is in a whitelist. Vendored and stdlib +// VerifyTransitiveAllowlist checks that the entire set of transitive +// dependencies of the given package is in a allowlist. Vendored and stdlib // packages are always allowed. -func VerifyTransitiveWhitelist(t testing.TB, pkg string, allowedPkgs []string) { +func VerifyTransitiveAllowlist(t testing.TB, pkg string, allowedPkgs []string) { // Skip test if source is not available. if build.Default.GOPATH == "" { t.Skip("GOPATH isn't set") diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 49d0519e3c25..33da85262146 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -816,7 +816,7 @@ func TestLint(t *testing.T) { "git", "grep", "-nE", - `\.Clone\([^)]`, + `proto\.Clone\([^)]`, "--", "*.go", ":!util/protoutil/clone_test.go", @@ -1079,6 +1079,7 @@ func TestLint(t *testing.T) { stream.GrepNot(`^storage\/rocksdb_error_dict\.go$`), stream.GrepNot(`^workload/tpcds/tpcds.go$`), stream.GrepNot(`^geo/geoprojbase/projections.go$`), + stream.GrepNot(`^sql/logictest/testdata/logic_test/pg_extension$`), stream.Map(func(s string) string { return filepath.Join(pkgDir, s) }), @@ -1290,7 +1291,7 @@ func TestLint(t *testing.T) { stream.GrepNot(`cockroach/pkg/testutils/lint: log$`), stream.GrepNot(`cockroach/pkg/util/sysutil: syscall$`), stream.GrepNot(`cockroach/pkg/util/log: github\.com/pkg/errors$`), - stream.GrepNot(`cockroach/pkg/(base|security|util/(log|randutil|stop)): log$`), + stream.GrepNot(`cockroach/pkg/(base|release|security|util/(log|randutil|stop)): log$`), stream.GrepNot(`cockroach/pkg/(server/serverpb|ts/tspb): github\.com/golang/protobuf/proto$`), stream.GrepNot(`cockroach/pkg/util/uuid: github\.com/satori/go\.uuid$`), @@ -1482,6 +1483,11 @@ func TestLint(t *testing.T) { ":!sql/colexecbase/colexecerror/error.go", ":!sql/colexec/execpb/stats.pb.go", ":!sql/colflow/vectorized_panic_propagation_test.go", + // This exception is because execgen itself uses panics during code + // generation - not at execution time. The (glob,exclude) directive + // (see git help gitglossary) makes * behave like a normal, single dir + // glob, and exclude is the synonym of !. + ":(glob,exclude)sql/colexec/execgen/*.go", ) if err != nil { t.Fatal(err) @@ -1704,6 +1710,10 @@ func TestLint(t *testing.T) { "WarningfDepth", "Wrapf", "WrapWithDepthf", + "redact.Fprint", + "redact.Fprintf", + "redact.Sprint", + "redact.Sprintf", }, ",") filters := []stream.Filter{ @@ -1763,6 +1773,12 @@ func TestLint(t *testing.T) { // because addStructured takes its positional argument as []interface{}, // instead of ...interface{}. stream.GrepNot(`pkg/util/log/structured\.go:\d+:\d+: addStructured\(\): format argument is not a constant expression`), + // The markers test file is passing test case formats + // to the printf function. + stream.GrepNot(`pkg/util/redact/markers_test\.go:\d+:\d+: TestRedactStream\(\): format argument is not a constant expression`), + // roachtest is not collecting redactable logs so we don't care + // about printf hygiene there as much. + stream.GrepNot(`pkg/cmd/roachtest/log\.go:.*format argument is not a constant expression`), } roachlint, err := exec.LookPath("roachvet") diff --git a/pkg/testutils/lint/passes/fmtsafe/fmtsafe.go b/pkg/testutils/lint/passes/fmtsafe/fmtsafe.go index 57f10e9f7b3c..f55046ac48ee 100644 --- a/pkg/testutils/lint/passes/fmtsafe/fmtsafe.go +++ b/pkg/testutils/lint/passes/fmtsafe/fmtsafe.go @@ -223,7 +223,7 @@ func checkCallExpr(pass *analysis.Pass, enclosingFnName string, call *ast.CallEx lit := pass.TypesInfo.Types[call.Args[idx]].Value if lit != nil { - // A literal constant! All is well. + // A literal or constant! All is well. return } @@ -251,7 +251,7 @@ func checkCallExpr(pass *analysis.Pass, enclosingFnName string, call *ast.CallEx // Tip is exported for use in tests. var Tip = ` -Tip: use YourFuncf("descriptive prefix %s", ...) or list new formatting wrappers in pkg/testutils/lint/passes/fmtsafe/functions.go.` +Tip: use YourFuncf("descriptive prefix %%s", ...) or list new formatting wrappers in pkg/testutils/lint/passes/fmtsafe/functions.go.` func hasNoLintComment(pass *analysis.Pass, call *ast.CallExpr, idx int) bool { fPos, f := findContainingFile(pass, call) diff --git a/pkg/testutils/lint/passes/fmtsafe/functions.go b/pkg/testutils/lint/passes/fmtsafe/functions.go index 04cc59c12469..40a4909835cd 100644 --- a/pkg/testutils/lint/passes/fmtsafe/functions.go +++ b/pkg/testutils/lint/passes/fmtsafe/functions.go @@ -60,20 +60,29 @@ var requireConstFmt = map[string]bool{ "(*log.Logger).Panicf": true, "(*log.Logger).Printf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.Shoutf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.Infof": true, - "github.com/cockroachdb/cockroach/pkg/util/log.Warningf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.Errorf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.Eventf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.VEventf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.VErrEventf": true, - "github.com/cockroachdb/cockroach/pkg/util/log.InfofDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.WarningfDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.ErrorfDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.FatalfDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.VEventfDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.VErrEventfDepth": true, - "github.com/cockroachdb/cockroach/pkg/util/log.ReportOrPanic": true, + "github.com/cockroachdb/cockroach/pkg/util/log.Shoutf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.Infof": true, + "github.com/cockroachdb/cockroach/pkg/util/log.Warningf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.Errorf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.Eventf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.vEventf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.VEventf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.VErrEventf": true, + "github.com/cockroachdb/cockroach/pkg/util/log.InfofDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.WarningfDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.ErrorfDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.FatalfDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.VEventfDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.VErrEventfDepth": true, + "github.com/cockroachdb/cockroach/pkg/util/log.ReportOrPanic": true, + "github.com/cockroachdb/cockroach/pkg/util/log.MakeEntry": true, + "github.com/cockroachdb/cockroach/pkg/util/log.FormatWithContextTags": true, + "github.com/cockroachdb/cockroach/pkg/util/log.renderArgs": true, + + "(*github.com/cockroachdb/cockroach/pkg/util/log.loggerT).makeStartLine": true, + "(*github.com/cockroachdb/cockroach/pkg/util/log.SecondaryLogger).output": true, + "(*github.com/cockroachdb/cockroach/pkg/util/log.SecondaryLogger).Logf": true, + "(*github.com/cockroachdb/cockroach/pkg/util/log.SecondaryLogger).LogfDepth": true, "(github.com/cockroachdb/cockroach/pkg/rpc.breakerLogger).Debugf": true, "(github.com/cockroachdb/cockroach/pkg/rpc.breakerLogger).Infof": true, @@ -146,6 +155,12 @@ var requireConstFmt = map[string]bool{ "github.com/cockroachdb/errors.NewAssertionErrorWithWrappedErrf": true, "github.com/cockroachdb/errors.WithSafeDetails": true, + "github.com/cockroachdb/cockroach/pkg/util/redact.Sprintf": true, + "github.com/cockroachdb/cockroach/pkg/util/redact.Fprintf": true, + "(github.com/cockroachdb/cockroach/pkg/util/redact.SafePrinter).Printf": true, + "(github.com/cockroachdb/cockroach/pkg/util/redact.SafeWriter).Printf": true, + "(*github.com/cockroachdb/cockroach/pkg/util/redact.printer).Printf": true, + "github.com/cockroachdb/cockroach/pkg/roachpb.NewErrorf": true, "github.com/cockroachdb/cockroach/pkg/ccl/importccl.makeRowErr": true, @@ -159,6 +174,8 @@ var requireConstFmt = map[string]bool{ "github.com/cockroachdb/cockroach/pkg/sql/opt/optbuilder.unimplementedWithIssueDetailf": true, + "(*github.com/cockroachdb/cockroach/pkg/sql/pgwire.authPipe).Logf": true, + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.Newf": true, "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.NewWithDepthf": true, "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror.DangerousStatementf": true, diff --git a/pkg/testutils/lint/testdata/errcheck_excludes.txt b/pkg/testutils/lint/testdata/errcheck_excludes.txt index 085af18a23b3..970c84af3b66 100644 --- a/pkg/testutils/lint/testdata/errcheck_excludes.txt +++ b/pkg/testutils/lint/testdata/errcheck_excludes.txt @@ -1,6 +1,8 @@ fmt.Fprint fmt.Fprintf fmt.Fprintln +github.com/cockroachdb/cockroach/pkg/util/redact.Fprint +github.com/cockroachdb/cockroach/pkg/util/redact.Fprintf (*bufio.Writer).Flush (*database/sql.DB).Close (*database/sql.Rows).Close diff --git a/pkg/ui/.storybook/decorators/index.ts b/pkg/ui/.storybook/decorators/index.ts index b6636d1c4f94..1ce42bbb90dd 100644 --- a/pkg/ui/.storybook/decorators/index.ts +++ b/pkg/ui/.storybook/decorators/index.ts @@ -9,3 +9,4 @@ // licenses/APL.txt. export * from "./withRouterProvider"; +export * from "./withBackground"; diff --git a/pkg/ui/.storybook/decorators/withBackground.tsx b/pkg/ui/.storybook/decorators/withBackground.tsx new file mode 100644 index 000000000000..d3099e2b5d41 --- /dev/null +++ b/pkg/ui/.storybook/decorators/withBackground.tsx @@ -0,0 +1,18 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import React from "react"; +import {RenderFunction} from "storybook__react"; + +export const withBackgroundFactory = (backgroundColor = "#F5F7FA") => (storyFn: RenderFunction) => ( +
+ {storyFn()} +
+); diff --git a/pkg/ui/src/views/app/components/Search/index.tsx b/pkg/ui/src/views/app/components/Search/index.tsx index 8a8fb790664f..824d529361d8 100644 --- a/pkg/ui/src/views/app/components/Search/index.tsx +++ b/pkg/ui/src/views/app/components/Search/index.tsx @@ -13,7 +13,8 @@ import { InputProps } from "antd/lib/input"; import CancelIcon from "assets/cancel.svg"; import SearchIcon from "assets/search.svg"; import React from "react"; -import "./search.styl"; +import classNames from "classnames/bind"; +import styles from "./search.module.styl"; interface ISearchProps { onSubmit: (value: string) => void; @@ -29,6 +30,8 @@ interface ISearchState { type TSearchProps = ISearchProps & InputProps; +const cx = classNames.bind(styles); + export class Search extends React.Component { state = { value: this.props.defaultValue || "", @@ -62,16 +65,24 @@ export class Search extends React.Component { const { value, submitted } = this.state; if (value.length > 0) { if (submitted) { - return ; + return ( + + ); } - return ; + return ; } return null; } render() { const { value, submitted } = this.state; - const className = submitted ? "_submitted" : ""; + const className = submitted ? cx("_submitted") : ""; /* current antdesign (3.25.3) has Input.d.ts incompatible with latest @types/react @@ -82,13 +93,13 @@ export class Search extends React.Component { const MyInput = Input as any; return ( -
+ } + prefix={} suffix={this.renderSuffix()} value={value} {...this.props} diff --git a/pkg/ui/src/views/app/components/Search/search.styl b/pkg/ui/src/views/app/components/Search/search.module.styl similarity index 89% rename from pkg/ui/src/views/app/components/Search/search.styl rename to pkg/ui/src/views/app/components/Search/search.module.styl index 584835a5f115..045af88d05ff 100644 --- a/pkg/ui/src/views/app/components/Search/search.styl +++ b/pkg/ui/src/views/app/components/Search/search.module.styl @@ -14,12 +14,13 @@ ._search-form width 280px height 40px - .ant-input-affix-wrapper + :global(.ant-input-affix-wrapper) + height 40px &:hover - .ant-input:not(.ant-input-disabled) + :global(.ant-input:not(.ant-input-disabled)) border-color $adminui-blue-1-base border-right-width 2px !important - .ant-btn + :global(.ant-btn) margin 0 padding 0 width auto @@ -44,7 +45,7 @@ line-height 0px !important &:hover color $adminui-grey-2 - .ant-input + :global(.ant-input) font-size 14px font-family $font-family--base color $adminui-grey-1 @@ -60,6 +61,6 @@ padding-left 35px padding-right 60px ._submitted - .ant-input + :global(.ant-input) &:not(:first-child) padding-right 40px diff --git a/pkg/ui/src/views/app/components/Search/search.stories.tsx b/pkg/ui/src/views/app/components/Search/search.stories.tsx new file mode 100644 index 000000000000..496f90dfeba3 --- /dev/null +++ b/pkg/ui/src/views/app/components/Search/search.stories.tsx @@ -0,0 +1,22 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import React from "react"; +import { storiesOf } from "@storybook/react"; + +import { Search } from "./index"; + +storiesOf("Search", module) + .add("empty", () => ( + {}} value="" /> + )) + .add("with search text", () => ( + {}} value="select * from" /> + )); diff --git a/pkg/ui/src/views/cluster/components/range/index.tsx b/pkg/ui/src/views/cluster/components/range/index.tsx index 3efbe9e73d8e..58befcc9b4f5 100644 --- a/pkg/ui/src/views/cluster/components/range/index.tsx +++ b/pkg/ui/src/views/cluster/components/range/index.tsx @@ -164,7 +164,7 @@ class RangeSelect extends React.Component { onClick={this.handleOptionButtonOnClick(option)} ghost > - {this.props.selected.title !== "Custom" && option.value === "Custom" ? "--" : option.timeLabel} + {this.props.selected.title !== "Custom" && option.value === "Custom" ? "--" : option.timeLabel} {option.value === "Custom" ? "Custom date range" : option.value} ) diff --git a/pkg/ui/src/views/cluster/components/range/range.styl b/pkg/ui/src/views/cluster/components/range/range.styl index f21d721f9884..95fef6685da9 100644 --- a/pkg/ui/src/views/cluster/components/range/range.styl +++ b/pkg/ui/src/views/cluster/components/range/range.styl @@ -230,3 +230,17 @@ border 1px solid $colors--neutral-5 &:hover background $background-color + +.range__range-title + display flex + justify-content center + align-items center + background $colors--neutral-3 + width 34px + text-align center + border-radius 3px + color $colors--neutral-7 + font-size 12px + line-height 24px + letter-spacing 0.1px + font-family $font-family--bold diff --git a/pkg/ui/src/views/cluster/containers/nodeLogs/index.tsx b/pkg/ui/src/views/cluster/containers/nodeLogs/index.tsx index 7d7e113cf15a..d6c038944140 100644 --- a/pkg/ui/src/views/cluster/containers/nodeLogs/index.tsx +++ b/pkg/ui/src/views/cluster/containers/nodeLogs/index.tsx @@ -61,7 +61,7 @@ export class Logs extends React.Component { title: "Message", cell: (index: number) => (
-              { logEntries[index].message }
+              { (logEntries[index].tags ? "[" + logEntries[index].tags + "] " : "") + logEntries[index].message }
             
), }, diff --git a/pkg/ui/src/views/reports/containers/network/filter/index.tsx b/pkg/ui/src/views/reports/containers/network/filter/index.tsx index 744c16cb3526..ff46acbc9d68 100644 --- a/pkg/ui/src/views/reports/containers/network/filter/index.tsx +++ b/pkg/ui/src/views/reports/containers/network/filter/index.tsx @@ -20,6 +20,7 @@ interface IFilterProps { deselectFilterByKey: (key: string) => void; sort: NetworkSort[]; filter: NetworkFilter; + dropDownClassName?: string; } interface IFilterState { @@ -106,6 +107,7 @@ export class Filter extends React.Component { render() { const { opened, width } = this.state; + const { dropDownClassName } = this.props; const containerLeft = this.rangeContainer.current ? this.rangeContainer.current.getBoundingClientRect().left : 0; const left = width >= (containerLeft + 240) ? 0 : width - (containerLeft + 240); return ( @@ -114,7 +116,7 @@ export class Filter extends React.Component { title="Filter" options={[]} selected="" - className={classNames({ "dropdown__focused": opened })} + className={classNames({ "dropdown__focused": opened }, dropDownClassName)} content={
this.setState({ opened: !opened })}/> diff --git a/pkg/ui/src/views/reports/containers/network/sort/index.tsx b/pkg/ui/src/views/reports/containers/network/sort/index.tsx index 9bbd678e63fd..635bc0fd1d89 100644 --- a/pkg/ui/src/views/reports/containers/network/sort/index.tsx +++ b/pkg/ui/src/views/reports/containers/network/sort/index.tsx @@ -68,8 +68,15 @@ class Sort extends React.Component { options={this.getSortValues(sort)} selected={this.pageView()} onChange={this.navigateTo} + className="Sort-latency__dropdown--spacing" + /> + - Collapse Nodes
diff --git a/pkg/ui/src/views/reports/containers/network/sort/sort.styl b/pkg/ui/src/views/reports/containers/network/sort/sort.styl index 229a826906f6..f8d9c306024f 100644 --- a/pkg/ui/src/views/reports/containers/network/sort/sort.styl +++ b/pkg/ui/src/views/reports/containers/network/sort/sort.styl @@ -12,6 +12,6 @@ display flex align-items center padding 0 24px - .dropdown - margin-right 24px - \ No newline at end of file + +.Sort-latency__dropdown--spacing + margin-right 24px diff --git a/pkg/ui/src/views/shared/components/dropdown/dropdown.styl b/pkg/ui/src/views/shared/components/dropdown/dropdown.module.styl similarity index 93% rename from pkg/ui/src/views/shared/components/dropdown/dropdown.styl rename to pkg/ui/src/views/shared/components/dropdown/dropdown.module.styl index 19bd8ef79e86..dc28e544d7ff 100644 --- a/pkg/ui/src/views/shared/components/dropdown/dropdown.styl +++ b/pkg/ui/src/views/shared/components/dropdown/dropdown.module.styl @@ -28,14 +28,14 @@ $dropdown-hover-color = darken($background-color, 2.5%) display flex align-items center - .Select + :global(.Select) position initial - .Select-menu-outer + :global(.Select-menu-outer) top calc(100% + 8px) padding 8px 0 - .Select-option + :global(.Select-option) font-size 14px line-height 22px font-family $font-family--base @@ -45,7 +45,7 @@ $dropdown-hover-color = darken($background-color, 2.5%) color $colors--primary-blue-3 !important background-color transparent - .dropdown__title, .Select-value-label + .dropdown__title, :global(.Select-value-label) color $adminui-grey-1 !important font-family SourceSansPro-SemiBold font-size 14px @@ -53,7 +53,7 @@ $dropdown-hover-color = darken($background-color, 2.5%) letter-spacing 0.1px &:hover - .Select-arrow-zone + :global(.Select-arrow-zone) path fill $colors--neutral-5 @@ -74,7 +74,7 @@ $dropdown-hover-color = darken($background-color, 2.5%) border 1px solid $colors--primary-blue-3 box-shadow 0px 0px 3px 2px $colors--primary-blue-1 - .Select-arrow-zone + :global(.Select-arrow-zone) color $adminui-blue-1-base .caret-down display flex @@ -163,7 +163,7 @@ $dropdown-hover-color = darken($background-color, 2.5%) &:hover background-color $dropdown-hover-color .dropdown.full-size - .Select-menu-outer, .Select-menu + :global(.Select-menu-outer), :global(.Select-menu) max-height 450px // NOTE: react-select styles can be found in styl/shame.styl diff --git a/pkg/ui/src/views/shared/components/dropdown/index.tsx b/pkg/ui/src/views/shared/components/dropdown/index.tsx index 65658fc54d45..70d98d25efd3 100644 --- a/pkg/ui/src/views/shared/components/dropdown/index.tsx +++ b/pkg/ui/src/views/shared/components/dropdown/index.tsx @@ -8,12 +8,12 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -import classNames from "classnames"; +import classNames from "classnames/bind"; import Select from "react-select"; import React from "react"; import _ from "lodash"; -import "./dropdown.styl"; +import styles from "./dropdown.module.styl"; import {leftArrow, rightArrow} from "src/views/shared/components/icons"; import { trustIcon } from "src/util/trust"; @@ -45,7 +45,17 @@ interface DropdownOwnProps { type?: "primary" | "secondary"; } -export const arrowRenderer = ({ isOpen }: { isOpen: boolean }) => ; +const cx = classNames.bind(styles); + +export const arrowRenderer = ({ isOpen }: { isOpen: boolean }) => + + + ; /** * Dropdown component that uses the URL query string for state. @@ -85,20 +95,27 @@ export default class Dropdown extends React.Component { render() { const { selected, options, onChange, onArrowClick, disabledArrows, content, isTimeRange, type = "secondary" } = this.props; - const className = classNames( + const className = cx( "dropdown", `dropdown--type-${type}`, - isTimeRange ? "_range" : "", - { "dropdown--side-arrows": !_.isNil(onArrowClick), "dropdown__focused": this.state.is_focused }, + { + "_range": isTimeRange, + "dropdown--side-arrows": !_.isNil(onArrowClick), + "dropdown__focused": this.state.is_focused, + }, this.props.className, ); - const leftClassName = classNames( + const leftClassName = cx( "dropdown__side-arrow", - { "dropdown__side-arrow--disabled": _.includes(disabledArrows, ArrowDirection.LEFT) }, + { + "dropdown__side-arrow--disabled": _.includes(disabledArrows, ArrowDirection.LEFT), + }, ); - const rightClassName = classNames( + const rightClassName = cx( "dropdown__side-arrow", - { "dropdown__side-arrow--disabled": _.includes(disabledArrows, ArrowDirection.RIGHT) }, + { + "dropdown__side-arrow--disabled": _.includes(disabledArrows, ArrowDirection.RIGHT), + }, ); return
@@ -109,12 +126,15 @@ export default class Dropdown extends React.Component { onClick={() => this.props.onArrowClick(ArrowDirection.LEFT)}> {this.props.title}{this.props.title && !isTimeRange ? ":" : ""} {content ? content :