Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

etcdutl: Fix snapshot restore memory alloc issue #17277

Merged
merged 2 commits into from
Feb 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion etcdutl/etcdutl/snapshot_command.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (

"go.etcd.io/etcd/etcdutl/v3/snapshot"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/storage/datadir"
)

Expand All @@ -38,6 +39,7 @@ var (
restorePeerURLs string
restoreName string
skipHashCheck bool
initialMmapSize = backend.InitialMmapSize
markCompacted bool
revisionBump uint64
)
Expand Down Expand Up @@ -77,6 +79,7 @@ func NewSnapshotRestoreCommand() *cobra.Command {
cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
cmd.Flags().Uint64Var(&initialMmapSize, "initial-memory-map-size", initialMmapSize, "Initial memory map size of the database in bytes. It uses the default value if not defined or defined to 0")
cmd.Flags().Uint64Var(&revisionBump, "bump-revision", 0, "How much to increase the latest revision after restore")
cmd.Flags().BoolVar(&markCompacted, "mark-compacted", false, "Mark the latest revision after restore as the point of scheduled compaction (required if --bump-revision > 0, disallowed otherwise)")

Expand Down Expand Up @@ -104,7 +107,7 @@ func SnapshotStatusCommandFunc(cmd *cobra.Command, args []string) {

func snapshotRestoreCommandFunc(_ *cobra.Command, args []string) {
SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
restorePeerURLs, restoreName, skipHashCheck, revisionBump, markCompacted, args)
restorePeerURLs, restoreName, skipHashCheck, initialMmapSize, revisionBump, markCompacted, args)
}

func SnapshotRestoreCommandFunc(restoreCluster string,
Expand All @@ -114,6 +117,7 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
restorePeerURLs string,
restoreName string,
skipHashCheck bool,
initialMmapSize uint64,
revisionBump uint64,
markCompacted bool,
args []string) {
Expand Down Expand Up @@ -149,6 +153,7 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
InitialCluster: restoreCluster,
InitialClusterToken: restoreClusterToken,
SkipHashCheck: skipHashCheck,
InitialMmapSize: initialMmapSize,
RevisionBump: revisionBump,
MarkCompacted: markCompacted,
}); err != nil {
Expand Down
15 changes: 11 additions & 4 deletions etcdutl/snapshot/v3_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ type v3Manager struct {
snapDir string
cl *membership.RaftCluster

skipHashCheck bool
skipHashCheck bool
initialMmapSize uint64
}

// hasChecksum returns "true" if the file size "n"
Expand Down Expand Up @@ -204,6 +205,9 @@ type RestoreConfig struct {
// (required if copied from data directory).
SkipHashCheck bool

// InitialMmapSize is the database initial memory map size.
InitialMmapSize uint64

// RevisionBump is the amount to increase the latest revision after restore,
// to allow administrators to trick clients into thinking that revision never decreased.
// If 0, revision bumping is skipped.
Expand Down Expand Up @@ -263,13 +267,15 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
s.walDir = walDir
s.snapDir = filepath.Join(dataDir, "member", "snap")
s.skipHashCheck = cfg.SkipHashCheck
s.initialMmapSize = cfg.InitialMmapSize

s.lg.Info(
"restoring snapshot",
zap.String("path", s.srcDbPath),
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)

if err = s.saveDB(); err != nil {
Expand Down Expand Up @@ -297,6 +303,7 @@ func (s *v3Manager) Restore(cfg RestoreConfig) error {
zap.String("wal-dir", s.walDir),
zap.String("data-dir", dataDir),
zap.String("snap-dir", s.snapDir),
zap.Uint64("initial-memory-map-size", s.initialMmapSize),
)

return verify.VerifyIfEnabled(verify.Config{
Expand All @@ -317,7 +324,7 @@ func (s *v3Manager) saveDB() error {
return err
}

be := backend.NewDefaultBackend(s.lg, s.outDbPath())
be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()

err = schema.NewMembershipBackend(s.lg, be).TrimMembershipFromBackend()
Expand Down Expand Up @@ -472,7 +479,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
}

// add members again to persist them to the backend we create.
be := backend.NewDefaultBackend(s.lg, s.outDbPath())
be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()
s.cl.SetBackend(schema.NewMembershipBackend(s.lg, be))
for _, m := range s.cl.Members() {
Expand Down Expand Up @@ -551,7 +558,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
}

func (s *v3Manager) updateCIndex(commit uint64, term uint64) error {
be := backend.NewDefaultBackend(s.lg, s.outDbPath())
be := backend.NewDefaultBackend(s.lg, s.outDbPath(), backend.WithMmapSize(s.initialMmapSize))
defer be.Close()

cindex.UpdateConsistentIndexForce(be.BatchTx(), commit, term)
Expand Down
20 changes: 16 additions & 4 deletions server/storage/backend/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ var (

defragLimit = 10000

// initialMmapSize is the initial size of the mmapped region. Setting this larger than
// InitialMmapSize is the initial size of the mmapped region. Setting this larger than
// the potential max db size can prevent writer from blocking reader.
// This only works for linux.
initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
InitialMmapSize = uint64(10 * 1024 * 1024 * 1024)

// minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
minSnapshotWarningTimeout = 30 * time.Second
Expand Down Expand Up @@ -151,11 +151,13 @@ type BackendConfig struct {
Hooks Hooks
}

type BackendConfigOption func(*BackendConfig)

func DefaultBackendConfig(lg *zap.Logger) BackendConfig {
return BackendConfig{
BatchInterval: defaultBatchInterval,
BatchLimit: defaultBatchLimit,
MmapSize: initialMmapSize,
MmapSize: InitialMmapSize,
Logger: lg,
}
}
Expand All @@ -164,9 +166,19 @@ func New(bcfg BackendConfig) Backend {
return newBackend(bcfg)
}

func NewDefaultBackend(lg *zap.Logger, path string) Backend {
func WithMmapSize(size uint64) BackendConfigOption {
return func(bcfg *BackendConfig) {
bcfg.MmapSize = size
}
}

func NewDefaultBackend(lg *zap.Logger, path string, opts ...BackendConfigOption) Backend {
bcfg := DefaultBackendConfig(lg)
bcfg.Path = path
for _, opt := range opts {
opt(&bcfg)
}

return newBackend(bcfg)
}

Expand Down
Loading