From 487496f41984d76e0e10f6a4becf43d35611c57e Mon Sep 17 00:00:00 2001 From: Steven Danna Date: Thu, 14 Dec 2023 21:06:33 +0000 Subject: [PATCH 1/4] backupccl: wrap a few errors Errors from the underlying parser calls used by rewrite functions can return confusing errors that make it appear as if the user's backup statement was invalid SQL. Epic: none Release note: None --- pkg/ccl/backupccl/restore_planning.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index fa3d2163735a..7a3760779a74 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -2118,19 +2118,19 @@ func doRestorePlan( overrideDBName = newDBName } if err := rewrite.TableDescs(tables, descriptorRewrites, overrideDBName); err != nil { - return err + return errors.Wrapf(err, "table descriptor rewrite failed") } if err := rewrite.DatabaseDescs(databases, descriptorRewrites, map[descpb.ID]struct{}{}); err != nil { - return err + return errors.Wrapf(err, "database descriptor rewrite failed") } if err := rewrite.SchemaDescs(schemas, descriptorRewrites); err != nil { - return err + return errors.Wrapf(err, "schema descriptor rewrite failed") } if err := rewrite.TypeDescs(types, descriptorRewrites); err != nil { - return err + return errors.Wrapf(err, "type descriptor rewrite failed") } if err := rewrite.FunctionDescs(functions, descriptorRewrites, overrideDBName); err != nil { - return err + return errors.Wrapf(err, "function descriptor rewrite failed") } encodedTables := make([]*descpb.TableDescriptor, len(tables)) From af5bff7658a8ac556035fb0485e05b0f8847c243 Mon Sep 17 00:00:00 2001 From: Steven Danna Date: Thu, 18 Jan 2024 15:06:08 +0000 Subject: [PATCH 2/4] roachtest: remove unnecessary cluster setting change As of #117116 this is now the default and the plan is to keep it the default in 24.1. Release note: None --- pkg/cmd/roachtest/tests/online_restore.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/cmd/roachtest/tests/online_restore.go b/pkg/cmd/roachtest/tests/online_restore.go index 10bf480ca3aa..5490a66c051c 100644 --- a/pkg/cmd/roachtest/tests/online_restore.go +++ b/pkg/cmd/roachtest/tests/online_restore.go @@ -149,9 +149,6 @@ func registerOnlineRestore(r registry.Registry) { if _, err := db.Exec("SET CLUSTER SETTING kv.queue.process.guaranteed_time_budget='1h'"); err != nil { return err } - if _, err := db.Exec("SET CLUSTER SETTING kv.snapshot_receiver.excise.enabled=true"); err != nil { - return err - } // TODO(dt): AC appears periodically reduce the workload to 0 QPS // during the download phase (sudden jumps from 0 to 2k qps to 0). // Disable for now until we figure out how to smooth this out. From 3c62854f87feab00e078a5d3ab1e30573390c3ad Mon Sep 17 00:00:00 2001 From: Steven Danna Date: Fri, 19 Jan 2024 15:07:21 +0000 Subject: [PATCH 3/4] bulk: add tracing spans to addSStable in SSTBatcher Epic: None Release note: None --- pkg/kv/bulk/sst_batcher.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/kv/bulk/sst_batcher.go b/pkg/kv/bulk/sst_batcher.go index 6ad67bbd7082..a15169d46d4d 100644 --- a/pkg/kv/bulk/sst_batcher.go +++ b/pkg/kv/bulk/sst_batcher.go @@ -767,6 +767,9 @@ func (b *SSTBatcher) addSSTable( updatesLastRange bool, ingestionPerformanceStats *bulkpb.IngestionPerformanceStats, ) error { + ctx, sp := tracing.ChildSpan(ctx, "*SSTBatcher.addSSTable") + defer sp.Finish() + sendStart := timeutil.Now() if ingestionPerformanceStats == nil { return errors.AssertionFailedf("ingestionPerformanceStats should not be nil") @@ -844,7 +847,11 @@ func (b *SSTBatcher) addSSTable( } ba.Add(req) beforeSend := timeutil.Now() - br, pErr := b.db.NonTransactionalSender().Send(ctx, ba) + + sendCtx, sendSp := tracing.ChildSpan(ctx, "*SSTBatcher.addSSTable/Send") + br, pErr := b.db.NonTransactionalSender().Send(sendCtx, ba) + sendSp.Finish() + sendTime := timeutil.Since(beforeSend) ingestionPerformanceStats.SendWait += sendTime From ea0d3148c5ac0067e0a3ab22a81d829ae59548a5 Mon Sep 17 00:00:00 2001 From: DarrylWong Date: Wed, 17 Jan 2024 12:52:01 -0500 Subject: [PATCH 4/4] roachtest: update tpce tests wtih new connection options The tpc-e driver has been updated to accept options for a fixture bucket, port, username and password. This change adds those options accordingly to the tpce tests. Previously, tpce hardcoded the sql port as 26257. Now that tpce can accept a port option, we can instead discover the port and pass it. This change also includes a new SQLPort function that returns the sql port of the given nodes, as well as fixes miscellaneous function signatures to correctly say nodes instead of node. Release note: none Epic: none Fixes: #117567 --- pkg/cmd/roachtest/cluster.go | 86 ++++++++++++------- .../roachtest/cluster/cluster_interface.go | 3 +- pkg/cmd/roachtest/tests/gossip.go | 2 +- pkg/cmd/roachtest/tests/restore.go | 16 ++-- pkg/cmd/roachtest/tests/tpce.go | 50 +++++++++-- pkg/roachprod/install/cockroach.go | 18 ++-- pkg/roachprod/install/expander.go | 2 +- pkg/roachprod/multitenant.go | 2 +- pkg/roachprod/roachprod.go | 40 ++++++++- 9 files changed, 157 insertions(+), 62 deletions(-) diff --git a/pkg/cmd/roachtest/cluster.go b/pkg/cmd/roachtest/cluster.go index c05539970d3a..30efbd96a275 100644 --- a/pkg/cmd/roachtest/cluster.go +++ b/pkg/cmd/roachtest/cluster.go @@ -1421,12 +1421,12 @@ func newHealthStatusResult(node int, status int, body []byte, err error) *Health // HealthStatus returns the result of the /health?ready=1 endpoint for each node. func (c *clusterImpl) HealthStatus( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]*HealthStatusResult, error) { - if len(node) < 1 { + if len(nodes) < 1 { return nil, nil // unit tests } - adminAddrs, err := c.ExternalAdminUIAddr(ctx, l, node) + adminAddrs, err := c.ExternalAdminUIAddr(ctx, l, nodes) if err != nil { return nil, errors.WithDetail(err, "Unable to get admin UI address(es)") } @@ -2466,7 +2466,7 @@ func (c *clusterImpl) loggerForCmd( return l, logFile, nil } -// pgURLErr returns the Postgres endpoint for the specified node. It accepts a +// pgURLErr returns the Postgres endpoint for the specified nodes. It accepts a // flag specifying whether the URL should include the node's internal or // external IP address. In general, inter-cluster communication and should use // internal IPs and communication from a test driver to nodes in a cluster @@ -2474,12 +2474,12 @@ func (c *clusterImpl) loggerForCmd( func (c *clusterImpl) pgURLErr( ctx context.Context, l *logger.Logger, - node option.NodeListOption, + nodes option.NodeListOption, external bool, tenant string, sqlInstance int, ) ([]string, error) { - urls, err := roachprod.PgURL(ctx, l, c.MakeNodes(node), c.localCertsDir, roachprod.PGURLOptions{ + urls, err := roachprod.PgURL(ctx, l, c.MakeNodes(nodes), c.localCertsDir, roachprod.PGURLOptions{ External: external, Secure: c.localCertsDir != "", VirtualClusterName: tenant, @@ -2496,9 +2496,13 @@ func (c *clusterImpl) pgURLErr( // InternalPGUrl returns the internal Postgres endpoint for the specified nodes. func (c *clusterImpl) InternalPGUrl( - ctx context.Context, l *logger.Logger, node option.NodeListOption, tenant string, sqlInstance int, + ctx context.Context, + l *logger.Logger, + nodes option.NodeListOption, + tenant string, + sqlInstance int, ) ([]string, error) { - return c.pgURLErr(ctx, l, node, false, tenant, sqlInstance) + return c.pgURLErr(ctx, l, nodes, false, tenant, sqlInstance) } // Silence unused warning. @@ -2506,9 +2510,13 @@ var _ = (&clusterImpl{}).InternalPGUrl // ExternalPGUrl returns the external Postgres endpoint for the specified nodes. func (c *clusterImpl) ExternalPGUrl( - ctx context.Context, l *logger.Logger, node option.NodeListOption, tenant string, sqlInstance int, + ctx context.Context, + l *logger.Logger, + nodes option.NodeListOption, + tenant string, + sqlInstance int, ) ([]string, error) { - return c.pgURLErr(ctx, l, node, true, tenant, sqlInstance) + return c.pgURLErr(ctx, l, nodes, true, tenant, sqlInstance) } func addrToAdminUIAddr(addr string) (string, error) { @@ -2552,32 +2560,46 @@ func addrToHostPort(addr string) (string, int, error) { } // InternalAdminUIAddr returns the internal Admin UI address in the form host:port -// for the specified node. +// for the specified nodes. func (c *clusterImpl) InternalAdminUIAddr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { - return c.adminUIAddr(ctx, l, node, false) + return c.adminUIAddr(ctx, l, nodes, false) } // ExternalAdminUIAddr returns the external Admin UI address in the form host:port -// for the specified node. +// for the specified nodes. func (c *clusterImpl) ExternalAdminUIAddr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { - return c.adminUIAddr(ctx, l, node, true) + return c.adminUIAddr(ctx, l, nodes, true) +} + +func (c *clusterImpl) SQLPorts( + ctx context.Context, + l *logger.Logger, + nodes option.NodeListOption, + tenant string, + sqlInstance int, +) ([]int, error) { + return roachprod.SQLPorts(ctx, l, c.MakeNodes(nodes), c.IsSecure(), tenant, sqlInstance) } func (c *clusterImpl) AdminUIPorts( - ctx context.Context, l *logger.Logger, nodes option.NodeListOption, + ctx context.Context, + l *logger.Logger, + nodes option.NodeListOption, + tenant string, + sqlInstance int, ) ([]int, error) { - return roachprod.AdminPorts(ctx, l, c.MakeNodes(nodes), c.IsSecure()) + return roachprod.AdminPorts(ctx, l, c.MakeNodes(nodes), c.IsSecure(), tenant, sqlInstance) } func (c *clusterImpl) adminUIAddr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, external bool, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, external bool, ) ([]string, error) { var addrs []string - adminURLs, err := roachprod.AdminURL(ctx, l, c.MakeNodes(node), "", 0, "", + adminURLs, err := roachprod.AdminURL(ctx, l, c.MakeNodes(nodes), "", 0, "", external, false, false) if err != nil { return nil, err @@ -2598,32 +2620,32 @@ func (c *clusterImpl) adminUIAddr( // InternalIP returns the internal IP addresses for the specified nodes. func (c *clusterImpl) InternalIP( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { - return roachprod.IP(l, c.MakeNodes(node), false) + return roachprod.IP(l, c.MakeNodes(nodes), false) } // InternalAddr returns the internal address in the form host:port for the // specified nodes. func (c *clusterImpl) InternalAddr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { - return c.addr(ctx, l, node, false) + return c.addr(ctx, l, nodes, false) } // ExternalAddr returns the external address in the form host:port for the -// specified node. +// specified nodes. func (c *clusterImpl) ExternalAddr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { - return c.addr(ctx, l, node, true) + return c.addr(ctx, l, nodes, true) } func (c *clusterImpl) addr( - ctx context.Context, l *logger.Logger, node option.NodeListOption, external bool, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, external bool, ) ([]string, error) { var addrs []string - urls, err := c.pgURLErr(ctx, l, node, external, "" /* tenant */, 0 /* sqlInstance */) + urls, err := c.pgURLErr(ctx, l, nodes, external, "" /* tenant */, 0 /* sqlInstance */) if err != nil { return nil, err } @@ -2637,12 +2659,12 @@ func (c *clusterImpl) addr( return addrs, nil } -// ExternalIP returns the external IP addresses for the specified node. +// ExternalIP returns the external IP addresses for the specified nodes. func (c *clusterImpl) ExternalIP( - ctx context.Context, l *logger.Logger, node option.NodeListOption, + ctx context.Context, l *logger.Logger, nodes option.NodeListOption, ) ([]string, error) { var ips []string - addrs, err := c.ExternalAddr(ctx, l, node) + addrs, err := c.ExternalAddr(ctx, l, nodes) if err != nil { return nil, err } diff --git a/pkg/cmd/roachtest/cluster/cluster_interface.go b/pkg/cmd/roachtest/cluster/cluster_interface.go index 6ef6d40c704f..f8cc34aa85f4 100644 --- a/pkg/cmd/roachtest/cluster/cluster_interface.go +++ b/pkg/cmd/roachtest/cluster/cluster_interface.go @@ -75,6 +75,7 @@ type Cluster interface { InternalIP(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) ExternalAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) ExternalIP(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) + SQLPorts(ctx context.Context, l *logger.Logger, node option.NodeListOption, tenant string, sqlInstance int) ([]int, error) // SQL connection strings. @@ -90,7 +91,7 @@ type Cluster interface { InternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) ExternalAdminUIAddr(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]string, error) - AdminUIPorts(ctx context.Context, l *logger.Logger, node option.NodeListOption) ([]int, error) + AdminUIPorts(ctx context.Context, l *logger.Logger, node option.NodeListOption, tenant string, sqlInstance int) ([]int, error) // Running commands on nodes. diff --git a/pkg/cmd/roachtest/tests/gossip.go b/pkg/cmd/roachtest/tests/gossip.go index 87d153574c17..998886df3b44 100644 --- a/pkg/cmd/roachtest/tests/gossip.go +++ b/pkg/cmd/roachtest/tests/gossip.go @@ -425,7 +425,7 @@ SELECT count(replicas) t.L().Printf("killing all nodes\n") c.Stop(ctx, t.L(), option.DefaultStopOpts()) - adminPorts, err := c.AdminUIPorts(ctx, t.L(), c.Node(1)) + adminPorts, err := c.AdminUIPorts(ctx, t.L(), c.Node(1), "" /* tenant */, 0 /* sqlInstance */) if err != nil { t.Fatal(err) } diff --git a/pkg/cmd/roachtest/tests/restore.go b/pkg/cmd/roachtest/tests/restore.go index 1c58076aca84..04ae9e6b4b1a 100644 --- a/pkg/cmd/roachtest/tests/restore.go +++ b/pkg/cmd/roachtest/tests/restore.go @@ -749,8 +749,10 @@ func (tpce tpceRestore) init( ) { spec := tpce.getSpec(ctx, t, c, sp) spec.init(ctx, t, c, tpceCmdOptions{ - customers: tpce.customers, - racks: sp.nodes}) + customers: tpce.customers, + racks: sp.nodes, + connectionOpts: defaultTPCEConnectionOpts(), + }) } func (tpce tpceRestore) run( @@ -759,10 +761,12 @@ func (tpce tpceRestore) run( spec := tpce.getSpec(ctx, t, c, sp) _, err := spec.run(ctx, t, c, tpceCmdOptions{ // Set the duration to be a week to ensure the workload never exits early. - duration: time.Hour * 7 * 24, - customers: tpce.customers, - racks: sp.nodes, - threads: sp.cpus * sp.nodes}) + duration: time.Hour * 7 * 24, + customers: tpce.customers, + racks: sp.nodes, + threads: sp.cpus * sp.nodes, + connectionOpts: defaultTPCEConnectionOpts(), + }) return err } diff --git a/pkg/cmd/roachtest/tests/tpce.go b/pkg/cmd/roachtest/tests/tpce.go index 2ba31b3f26cb..85e70bec2bcb 100644 --- a/pkg/cmd/roachtest/tests/tpce.go +++ b/pkg/cmd/roachtest/tests/tpce.go @@ -33,6 +33,7 @@ type tpceSpec struct { loadNode int roachNodes option.NodeListOption roachNodeIPFlags []string + portFlag string } type tpceCmdOptions struct { @@ -42,6 +43,27 @@ type tpceCmdOptions struct { duration time.Duration threads int skipCleanup bool + connectionOpts tpceConnectionOpts +} + +type tpceConnectionOpts struct { + fixtureBucket string + user string + password string +} + +const ( + defaultFixtureBucket = "gs://cockroach-fixtures-us-east1/tpce-csv" + defaultUser = "root" + defaultPassword = "" +) + +func defaultTPCEConnectionOpts() tpceConnectionOpts { + return tpceConnectionOpts{ + fixtureBucket: defaultFixtureBucket, + user: defaultUser, + password: defaultPassword, + } } func (to tpceCmdOptions) AddCommandOptions(cmd *roachtestutil.Command) { @@ -51,6 +73,9 @@ func (to tpceCmdOptions) AddCommandOptions(cmd *roachtestutil.Command) { cmd.MaybeFlag(to.duration != 0, "duration", to.duration) cmd.MaybeFlag(to.threads != 0, "threads", to.threads) cmd.MaybeFlag(to.skipCleanup, "skip-cleanup", "") + cmd.MaybeFlag(to.connectionOpts.fixtureBucket != "", "bucket", to.connectionOpts.fixtureBucket) + cmd.MaybeFlag(to.connectionOpts.user != "", "pg-user", to.connectionOpts.user) + cmd.MaybeFlag(to.connectionOpts.password != "", "pg-password", to.connectionOpts.password) } func initTPCESpec( @@ -72,10 +97,16 @@ func initTPCESpec( for i, ip := range roachNodeIPs { roachNodeIPFlags[i] = fmt.Sprintf("--hosts=%s", ip) } + ports, err := c.SQLPorts(ctx, l, roachNodes, "" /* tenant */, 0 /* sqlInstance */) + if err != nil { + return nil, err + } + port := fmt.Sprintf("--pg-port=%d", ports[0]) return &tpceSpec{ loadNode: loadNode, roachNodes: roachNodes, roachNodeIPFlags: roachNodeIPFlags, + portFlag: port, }, nil } @@ -89,14 +120,14 @@ func (ts *tpceSpec) newCmd(o tpceCmdOptions) *roachtestutil.Command { // import of the data and schema creation. func (ts *tpceSpec) init(ctx context.Context, t test.Test, c cluster.Cluster, o tpceCmdOptions) { cmd := ts.newCmd(o).Option("init") - c.Run(ctx, option.WithNodes(c.Node(ts.loadNode)), fmt.Sprintf("%s %s", cmd, ts.roachNodeIPFlags[0])) + c.Run(ctx, option.WithNodes(c.Node(ts.loadNode)), fmt.Sprintf("%s %s %s", cmd, ts.portFlag, ts.roachNodeIPFlags[0])) } // run runs the tpce workload on cluster that has been initialized with the tpce schema. func (ts *tpceSpec) run( ctx context.Context, t test.Test, c cluster.Cluster, o tpceCmdOptions, ) (install.RunResultDetails, error) { - cmd := fmt.Sprintf("%s %s", ts.newCmd(o), strings.Join(ts.roachNodeIPFlags, " ")) + cmd := fmt.Sprintf("%s %s %s", ts.newCmd(o), ts.portFlag, strings.Join(ts.roachNodeIPFlags, " ")) return c.RunWithDetailsSingleNode(ctx, t.L(), option.WithNodes(c.Node(ts.loadNode)), cmd) } @@ -147,7 +178,6 @@ func runTPCE(ctx context.Context, t test.Test, c cluster.Cluster, opts tpceOptio t.Status("installing cockroach") startOpts := option.DefaultStartOpts() startOpts.RoachprodOpts.StoreCount = opts.ssds - roachtestutil.SetDefaultSQLPort(c, &startOpts.RoachprodOpts) settings := install.MakeClusterSettings(install.NumRacksOption(racks)) c.Start(ctx, t.L(), startOpts, settings, crdbNodes) } @@ -186,8 +216,9 @@ func runTPCE(ctx context.Context, t test.Test, c cluster.Cluster, opts tpceOptio } t.Status(fmt.Sprintf("initializing %d tpc-e customers%s", opts.customers, estimatedSetupTimeStr)) tpceSpec.init(ctx, t, c, tpceCmdOptions{ - customers: opts.customers, - racks: racks, + customers: opts.customers, + racks: racks, + connectionOpts: defaultTPCEConnectionOpts(), }) return nil }) @@ -207,10 +238,11 @@ func runTPCE(ctx context.Context, t test.Test, c cluster.Cluster, opts tpceOptio workloadDuration = 2 * time.Hour } runOptions := tpceCmdOptions{ - customers: opts.customers, - racks: racks, - duration: workloadDuration, - threads: opts.nodes * opts.cpus, + customers: opts.customers, + racks: racks, + duration: workloadDuration, + threads: opts.nodes * opts.cpus, + connectionOpts: defaultTPCEConnectionOpts(), } if opts.activeCustomers != 0 { runOptions.activeCustomers = opts.activeCustomers diff --git a/pkg/roachprod/install/cockroach.go b/pkg/roachprod/install/cockroach.go index 6196f868a3c5..0a143022b205 100644 --- a/pkg/roachprod/install/cockroach.go +++ b/pkg/roachprod/install/cockroach.go @@ -520,8 +520,10 @@ func (c *SyncedCluster) NodeURL( } // NodePort returns the system tenant's SQL port for the given node. -func (c *SyncedCluster) NodePort(ctx context.Context, node Node) (int, error) { - desc, err := c.DiscoverService(ctx, node, SystemInterfaceName, ServiceTypeSQL, 0) +func (c *SyncedCluster) NodePort( + ctx context.Context, node Node, virtualClusterName string, sqlInstance int, +) (int, error) { + desc, err := c.DiscoverService(ctx, node, virtualClusterName, ServiceTypeSQL, sqlInstance) if err != nil { return 0, err } @@ -529,8 +531,10 @@ func (c *SyncedCluster) NodePort(ctx context.Context, node Node) (int, error) { } // NodeUIPort returns the system tenant's AdminUI port for the given node. -func (c *SyncedCluster) NodeUIPort(ctx context.Context, node Node) (int, error) { - desc, err := c.DiscoverService(ctx, node, SystemInterfaceName, ServiceTypeUI, 0) +func (c *SyncedCluster) NodeUIPort( + ctx context.Context, node Node, virtualClusterName string, sqlInstance int, +) (int, error) { + desc, err := c.DiscoverService(ctx, node, virtualClusterName, ServiceTypeUI, sqlInstance) if err != nil { return 0, err } @@ -1163,7 +1167,7 @@ func (c *SyncedCluster) generateClusterSettingCmd( pathPrefix = fmt.Sprintf("%s_", virtualCluster) } path := fmt.Sprintf("%s/%ssettings-initialized", c.NodeDir(node, 1 /* storeIndex */), pathPrefix) - port, err := c.NodePort(ctx, node) + port, err := c.NodePort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return "", err } @@ -1185,7 +1189,7 @@ func (c *SyncedCluster) generateInitCmd(ctx context.Context, node Node) (string, } path := fmt.Sprintf("%s/%s", c.NodeDir(node, 1 /* storeIndex */), "cluster-bootstrapped") - port, err := c.NodePort(ctx, node) + port, err := c.NodePort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return "", err } @@ -1392,7 +1396,7 @@ func (c *SyncedCluster) createFixedBackupSchedule( node := c.Nodes[0] binary := cockroachNodeBinary(c, node) - port, err := c.NodePort(ctx, node) + port, err := c.NodePort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return err } diff --git a/pkg/roachprod/install/expander.go b/pkg/roachprod/install/expander.go index 6b8eb4909540..b6e123808daf 100644 --- a/pkg/roachprod/install/expander.go +++ b/pkg/roachprod/install/expander.go @@ -229,7 +229,7 @@ func (e *expander) maybeExpandUIPort( e.uiPorts = make(map[Node]string, len(c.VMs)) for _, node := range allNodes(len(c.VMs)) { // TODO(herko): Add support for separate-process services. - e.uiPorts[node] = fmt.Sprint(c.NodeUIPort(ctx, node)) + e.uiPorts[node] = fmt.Sprint(c.NodeUIPort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */)) } } diff --git a/pkg/roachprod/multitenant.go b/pkg/roachprod/multitenant.go index cadc263bc2d4..e7f7d893adb6 100644 --- a/pkg/roachprod/multitenant.go +++ b/pkg/roachprod/multitenant.go @@ -40,7 +40,7 @@ func StartServiceForVirtualCluster( var kvAddrs []string for _, node := range sc.Nodes { - port, err := sc.NodePort(ctx, node) + port, err := sc.NodePort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return err } diff --git a/pkg/roachprod/roachprod.go b/pkg/roachprod/roachprod.go index f1317d67d5d7..e219ec1fc4c1 100644 --- a/pkg/roachprod/roachprod.go +++ b/pkg/roachprod/roachprod.go @@ -1074,9 +1074,41 @@ func AdminURL( return urlGenerator(ctx, c, l, c.TargetNodes(), uConfig) } +// SQLPorts finds the SQL ports for a cluster. +func SQLPorts( + ctx context.Context, + l *logger.Logger, + clusterName string, + secure bool, + virtualClusterName string, + sqlInstance int, +) ([]int, error) { + if err := LoadClusters(); err != nil { + return nil, err + } + c, err := newCluster(l, clusterName, install.SecureOption(secure)) + if err != nil { + return nil, err + } + var ports []int + for _, node := range c.Nodes { + port, err := c.NodePort(ctx, node, virtualClusterName, sqlInstance) + if err != nil { + return nil, errors.Wrapf(err, "Error discovering SQL Port for node %d", node) + } + ports = append(ports, port) + } + return ports, nil +} + // AdminPorts finds the AdminUI ports for a cluster. func AdminPorts( - ctx context.Context, l *logger.Logger, clusterName string, secure bool, + ctx context.Context, + l *logger.Logger, + clusterName string, + secure bool, + virtualClusterName string, + sqlInstance int, ) ([]int, error) { if err := LoadClusters(); err != nil { return nil, err @@ -1087,7 +1119,7 @@ func AdminPorts( } var ports []int for _, node := range c.Nodes { - port, err := c.NodeUIPort(ctx, node) + port, err := c.NodeUIPort(ctx, node, virtualClusterName, sqlInstance) if err != nil { return nil, errors.Wrapf(err, "Error discovering UI Port for node %d", node) } @@ -1140,7 +1172,7 @@ func Pprof(ctx context.Context, l *logger.Logger, clusterName string, opts Pprof func(ctx context.Context, node install.Node) (*install.RunResultDetails, error) { res := &install.RunResultDetails{Node: node} host := c.Host(node) - port, err := c.NodeUIPort(ctx, node) + port, err := c.NodeUIPort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return nil, err } @@ -2197,7 +2229,7 @@ func sendCaptureCommand( httpClient := httputil.NewClientWithTimeout(0 /* timeout: None */) _, _, err := c.ParallelE(ctx, l, install.WithNodes(nodes).WithDisplay(fmt.Sprintf("Performing workload capture %s", action)), func(ctx context.Context, node install.Node) (*install.RunResultDetails, error) { - port, err := c.NodeUIPort(ctx, node) + port, err := c.NodeUIPort(ctx, node, "" /* virtualClusterName */, 0 /* sqlInstance */) if err != nil { return nil, err }