Skip to content

Commit

Permalink
cliccl: update load show with summary subcommand
Browse files Browse the repository at this point in the history
Previously, `load show` provides different options to display subset of
meta information of backup, but the option parsing could be error prone and
prefer a better way to provide users with subset options(e.g.with --include).

This commit updates `load show` with `summary` subcommand to display
all metadata of an individual backup for debugging.

Release justification: non-production code changes.
Release note (cli change): Update `load show` with `summary` subcommand to
display meta information of an individual backup. Users are now able to
inspect backup metadata, files, spans and descriptors with cli command
`cockroach load show summary <backup_url>` without a running cluster.
  • Loading branch information
Elliebababa committed Mar 12, 2021
1 parent 4f2c444 commit aa7238a
Show file tree
Hide file tree
Showing 3 changed files with 139 additions and 172 deletions.
212 changes: 92 additions & 120 deletions pkg/ccl/cliccl/load.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,34 +30,37 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/storage/cloud"
"github.com/cockroachdb/cockroach/pkg/storage/cloudimpl"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
)

const (
descriptors = "descriptors"
files = "files"
spans = "spans"
metadata = "metadata"
)

var externalIODir string

func init() {
loadShowCmd := &cobra.Command{
Use: "show [descriptors|files|spans|metadata] <backup_path>",

loadShowSummaryCmd := &cobra.Command{
Use: "summary <backup_path>",
Short: "show backups summary",
Long: "Shows summary of meta information about a SQL backup.",
Args: cobra.ExactArgs(1),
RunE: cli.MaybeDecorateGRPCError(runLoadShowSummary),
}

loadShowCmds := &cobra.Command{
Use: "show [command]",
Short: "show backups",
Long: "Shows subset(s) of meta information about a SQL backup.",
Long: "Shows information about a SQL backup.",
Args: cobra.MinimumNArgs(1),
RunE: cli.MaybeDecorateGRPCError(runLoadShow),
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Usage()
},
}

loadCmds := &cobra.Command{
Use: "load [command]",
Short: "loading commands",
Short: "load backup commands",
Long: `Commands for bulk loading external files.`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Usage()
Expand All @@ -73,72 +76,29 @@ func init() {
cliflags.ExternalIODir.Usage())

cli.AddCmd(loadCmds)
loadCmds.AddCommand(loadShowCmd)
loadShowCmd.Flags().AddFlagSet(loadFlags)
loadCmds.AddCommand(loadShowCmds)
loadShowCmds.AddCommand(loadShowSummaryCmd)
loadShowSummaryCmd.Flags().AddFlagSet(loadFlags)
}

func newBlobFactory(ctx context.Context, dialing roachpb.NodeID) (blobs.BlobClient, error) {
if dialing != 0 {
return nil, errors.Errorf(`only support nodelocal (0/self) under offline inspection`)
return nil, errors.Errorf("accessing node %d during nodelocal access is unsupported for CLI inspection; only local access is supported with nodelocal://self", dialing)
}
if externalIODir == "" {
externalIODir = filepath.Join(server.DefaultStorePath, "extern")
}
return blobs.NewLocalClient(externalIODir)
}

func parseShowArgs(args []string) (options map[string]bool, path string, err error) {
options = make(map[string]bool)
for _, arg := range args {
switch strings.ToLower(arg) {
case descriptors:
options[descriptors] = true
case files:
options[files] = true
case spans:
options[spans] = true
case metadata:
options[metadata] = true
default:
if path != "" {
return nil, "", errors.New("more than one path is specifiied")
}
path = arg
}
}

if len(options) == 0 {
options[descriptors] = true
options[files] = true
options[spans] = true
options[metadata] = true
}

if len(args) == len(options) {
return nil, "", errors.New("backup_path argument is required")
}
return options, path, nil
}

func runLoadShow(cmd *cobra.Command, args []string) error {

var options map[string]bool
var path string
var err error
if options, path, err = parseShowArgs(args); err != nil {
return err
}

var showHeaders bool
if len(options) > 1 {
showHeaders = true
}
func runLoadShowSummary(cmd *cobra.Command, args []string) error {

ctx := context.Background()
path := args[0]
if !strings.Contains(path, "://") {
path = cloudimpl.MakeLocalStorageURI(path)
}

ctx := context.Background()
externalStorageFromURI := func(ctx context.Context, uri string,
user security.SQLUsername) (cloud.ExternalStorage, error) {
return cloudimpl.ExternalStorageFromURI(ctx, uri, base.ExternalIODirConfig{},
Expand All @@ -154,25 +114,10 @@ func runLoadShow(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}

if _, ok := options[metadata]; ok {
showMeta(desc)
}

if _, ok := options[spans]; ok {
showSpans(desc, showHeaders)
}

if _, ok := options[files]; ok {
showFiles(desc, showHeaders)
}

if _, ok := options[descriptors]; ok {
if err := showDescriptor(desc); err != nil {
return err
}
}

showMeta(desc)
showSpans(desc)
showFiles(desc)
showDescriptors(desc)
return nil
}

Expand All @@ -190,75 +135,102 @@ func showMeta(desc backupccl.BackupManifest) {
fmt.Printf("BuildInfo: %s\n", desc.BuildInfo.Short())
}

func showSpans(desc backupccl.BackupManifest, showHeaders bool) {
tabfmt := ""
if showHeaders {
fmt.Printf("Spans:\n")
tabfmt = "\t"
func showSpans(desc backupccl.BackupManifest) {
fmt.Printf("Spans:\n")
if len(desc.Spans) == 0 {
fmt.Printf(" (No spans included in the specified backup path.)\n")
}
for _, s := range desc.Spans {
fmt.Printf("%s%s\n", tabfmt, s)
fmt.Printf(" %s\n", s)
}
}

func showFiles(desc backupccl.BackupManifest, showHeaders bool) {
tabfmt := ""
if showHeaders {
fmt.Printf("Files:\n")
tabfmt = "\t"
func showFiles(desc backupccl.BackupManifest) {
fmt.Printf("Files:\n")
if len(desc.Files) == 0 {
fmt.Printf(" (No sst files included in the specified backup path.)\n")
}
for _, f := range desc.Files {
fmt.Printf("%s%s:\n", tabfmt, f.Path)
fmt.Printf("%s Span: %s\n", tabfmt, f.Span)
fmt.Printf("%s Sha512: %0128x\n", tabfmt, f.Sha512)
fmt.Printf("%s DataSize: %d (%s)\n", tabfmt, f.EntryCounts.DataSize, humanizeutil.IBytes(f.EntryCounts.DataSize))
fmt.Printf("%s Rows: %d\n", tabfmt, f.EntryCounts.Rows)
fmt.Printf("%s IndexEntries: %d\n", tabfmt, f.EntryCounts.IndexEntries)
fmt.Printf(" %s:\n", f.Path)
fmt.Printf(" Span: %s\n", f.Span)
fmt.Printf(" DataSize: %d (%s)\n", f.EntryCounts.DataSize, humanizeutil.IBytes(f.EntryCounts.DataSize))
fmt.Printf(" Rows: %d\n", f.EntryCounts.Rows)
fmt.Printf(" IndexEntries: %d\n", f.EntryCounts.IndexEntries)
}
}

func showDescriptor(desc backupccl.BackupManifest) error {
func showDescriptors(desc backupccl.BackupManifest) {
// Note that these descriptors could be from any past version of the cluster,
// in case more fields need to be added to the output.
dbIDs := make([]descpb.ID, 0)
dbIDs := make([]descpb.ID, 0, len(desc.Descriptors))
dbIDToName := make(map[descpb.ID]string)
schemaIDs := make([]descpb.ID, 0)
schemaIDs := make([]descpb.ID, 0, len(desc.Descriptors))
schemaIDs = append(schemaIDs, keys.PublicSchemaID)
schemaIDToName := make(map[descpb.ID]string)
schemaIDToName[keys.PublicSchemaID] = sessiondata.PublicSchemaName
schemaIDToFullyQualifiedName := make(map[descpb.ID]string)
schemaIDToFullyQualifiedName[keys.PublicSchemaID] = sessiondata.PublicSchemaName
typeIDs := make([]descpb.ID, 0, len(desc.Descriptors))
typeIDToFullyQualifiedName := make(map[descpb.ID]string)
tableIDs := make([]descpb.ID, 0, len(desc.Descriptors))
tableIDToFullyQualifiedName := make(map[descpb.ID]string)
for i := range desc.Descriptors {
d := &desc.Descriptors[i]
id := descpb.GetDescriptorID(d)
if d.GetDatabase() != nil {
tableDesc, databaseDesc, typeDesc, schemaDesc := descpb.FromDescriptor(d)
if databaseDesc != nil {
dbIDToName[id] = descpb.GetDescriptorName(d)
dbIDs = append(dbIDs, id)
} else if d.GetSchema() != nil {
schemaIDToName[id] = descpb.GetDescriptorName(d)
} else if schemaDesc != nil {
dbName := dbIDToName[schemaDesc.GetParentID()]
schemaName := descpb.GetDescriptorName(d)
schemaIDToFullyQualifiedName[id] = dbName + "." + schemaName
schemaIDs = append(schemaIDs, id)
} else if typeDesc != nil {
parentSchema := schemaIDToFullyQualifiedName[typeDesc.GetParentSchemaID()]
if parentSchema == sessiondata.PublicSchemaName {
parentSchema = dbIDToName[typeDesc.GetParentID()] + "." + parentSchema
}
typeName := descpb.GetDescriptorName(d)
typeIDToFullyQualifiedName[id] = parentSchema + "." + typeName
typeIDs = append(typeIDs, id)
} else if tableDesc != nil {
tbDesc := tabledesc.NewBuilder(tableDesc).BuildImmutable()
parentSchema := schemaIDToFullyQualifiedName[tbDesc.GetParentSchemaID()]
if parentSchema == sessiondata.PublicSchemaName {
parentSchema = dbIDToName[tableDesc.GetParentID()] + "." + parentSchema
}
tableName := descpb.GetDescriptorName(d)
tableIDToFullyQualifiedName[id] = parentSchema + "." + tableName
tableIDs = append(tableIDs, id)
}
}

fmt.Printf("Databases:\n")
for _, id := range dbIDs {
fmt.Printf(" %s\n",
dbIDToName[id])
fmt.Printf(" %d: %s\n",
id, dbIDToName[id])
}

fmt.Printf("Schemas:\n")
for _, id := range schemaIDs {
fmt.Printf(" %s\n",
schemaIDToName[id])
fmt.Printf(" %d: %s\n",
id, schemaIDToFullyQualifiedName[id])
}

fmt.Printf("Types:\n")
if len(typeIDs) == 0 {
fmt.Printf(" (No user-defined types included in the specified backup path.)\n")
}
for _, id := range typeIDs {
fmt.Printf(" %d: %s\n",
id, typeIDToFullyQualifiedName[id])
}

fmt.Printf("Tables:\n")
for i := range desc.Descriptors {
d := &desc.Descriptors[i]
if descpb.TableFromDescriptor(d, hlc.Timestamp{}) != nil {
tbDesc := tabledesc.NewImmutable(*descpb.TableFromDescriptor(d, hlc.Timestamp{}))
dbName := dbIDToName[tbDesc.GetParentID()]
schemaName := schemaIDToName[tbDesc.GetParentSchemaID()]
fmt.Printf(" %s.%s.%s\n", dbName, schemaName, descpb.GetDescriptorName(d))
}
if len(tableIDs) == 0 {
fmt.Printf(" (No tables included in the specified backup path.)\n")
}
for _, id := range tableIDs {
fmt.Printf(" %d: %s\n",
id, tableIDToFullyQualifiedName[id])
}
return nil
}
Loading

0 comments on commit aa7238a

Please sign in to comment.