Skip to content

Commit

Permalink
containers store: handle volatile and transient containers
Browse files Browse the repository at this point in the history
This splits up the containers.json file in the containers store into
two, adding the new file `volatile-containers.json`. This new file is
saved using the NoSync options, which is faster but isn't robust in
the case of an unclean shutdown.

In the standard case, only containers marked as "volatile" (i.e. those
started with `--rm`) are stored in the volatile json file. This means
such containers are faster, but may get lost in case of an unclean
shutdown. This is fine for these containers though, as they are not
meant to persist.

In the transient store case, all containers are stored in the volatile
json file, and it (plus the matching lock file) is stored on runroot
(i.e. tmpfs) instead of the regular directory. This mean all
containers are fast to write, and none are persisted across boot.

Signed-off-by: Alexander Larsson <alexl@redhat.com>
  • Loading branch information
alexlarsson committed Nov 9, 2022
1 parent bb54554 commit ce67cc3
Show file tree
Hide file tree
Showing 3 changed files with 151 additions and 44 deletions.
184 changes: 144 additions & 40 deletions containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,20 @@ import (
digest "github.com/opencontainers/go-digest"
)

type ContainerLocation int

// The backing store is split in two json file, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
const (
StableContainerLocation = iota
VolatileContainerLocation

MaxContainerLocations
)

var containerLocations = []ContainerLocation{StableContainerLocation, VolatileContainerLocation}

// A Container is a reference to a read-write layer with metadata.
type Container struct {
// ID is either one which was specified at create-time, or a random
Expand Down Expand Up @@ -64,6 +78,9 @@ type Container struct {
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`

Flags map[string]interface{} `json:"flags,omitempty"`

// VolatileStore is true if the container is from the volatile json file
VolatileStore bool `json:"-"`
}

// rwContainerStore provides bookkeeping for information about Containers.
Expand Down Expand Up @@ -118,14 +135,17 @@ type rwContainerStore interface {
}

type containerStore struct {
lockfile Locker
dir string
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
byname map[string]*Container
loadMut sync.Mutex
lockfile Locker
dir string
runDir string
transient bool
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
byname map[string]*Container
loadMut sync.Mutex
locationModified [MaxContainerLocations]bool
}

func copyContainer(c *Container) *Container {
Expand All @@ -142,6 +162,7 @@ func copyContainer(c *Container) *Container {
UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap),
Flags: copyStringInterfaceMap(c.Flags),
VolatileStore: c.VolatileStore,
}
}

Expand Down Expand Up @@ -176,6 +197,24 @@ func (c *Container) MountOpts() []string {
}
}

func containerLocation(c *Container) ContainerLocation {
if c.VolatileStore {
return VolatileContainerLocation
}
return StableContainerLocation

}

func (r *containerStore) markModifiedFor(c *Container) {
r.locationModified[containerLocation(c)] = true
}

func (r *containerStore) markAllModified() {
for _, location := range containerLocations {
r.locationModified[location] = true
}
}

// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting().
//
Expand Down Expand Up @@ -258,7 +297,13 @@ func (r *containerStore) Containers() ([]Container, error) {
return containers, nil
}

func (r *containerStore) containerspath() string {
func (r *containerStore) containerspath(location ContainerLocation) string {
if location == VolatileContainerLocation {
if r.transient {
return filepath.Join(r.runDir, "volatile-containers.json")
}
return filepath.Join(r.dir, "volatile-containers.json")
}
return filepath.Join(r.dir, "containers.json")
}

Expand All @@ -276,29 +321,49 @@ func (r *containerStore) datapath(id, key string) string {
// if it is held for writing.
func (r *containerStore) load(lockedForWriting bool) error {
needSave := false
rpath := r.containerspath()
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}

containers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &containers); err != nil {
return fmt.Errorf("loading %q: %w", rpath, err)

ids := make(map[string]*Container)

for _, location := range containerLocations {
rpath := r.containerspath(location)
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}

locationContainers := []*Container{}

if len(data) != 0 {
if err := json.Unmarshal(data, &locationContainers); err != nil {
return fmt.Errorf("loading %q: %w", rpath, err)
}
}

for _, container := range locationContainers {
// There should be no duplicated ids between json files, but lets check to be sure
if ids[container.ID] != nil {
continue // skip invalid duplicated container
}
// Remember where the container came from
if location == VolatileContainerLocation {
container.VolatileStore = true
}
containers = append(containers, container)
ids[container.ID] = container
}
}

idlist := make([]string, 0, len(containers))
layers := make(map[string]*Container)
ids := make(map[string]*Container)
names := make(map[string]*Container)
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = containers[n]
layers[container.LayerID] = containers[n]
for _, name := range container.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
r.markModifiedFor(container)
needSave = true
}
names[name] = containers[n]
Expand All @@ -322,33 +387,71 @@ func (r *containerStore) load(lockedForWriting bool) error {

// Save saves the contents of the store to disk. It should be called with
// the lock held, locked for writing.
func (r *containerStore) Save() error {
func (r *containerStore) save() error {
r.lockfile.AssertLockedForWriting()
rpath := r.containerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
jdata, err := json.Marshal(&r.containers)
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
return err
for _, location := range containerLocations {
rpath := r.containerspath(location)
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
if !r.locationModified[location] {
continue
}
r.locationModified[location] = false
subsetContainers := make([]*Container, 0, len(r.containers))
for _, container := range r.containers {
if containerLocation(container) == location {
subsetContainers = append(subsetContainers, container)
}
}

jdata, err := json.Marshal(&subsetContainers)
if err != nil {
return err
}
var opts *ioutils.AtomicFileWriterOptions
if location == VolatileContainerLocation {
opts = &ioutils.AtomicFileWriterOptions{
NoSync: true,
}
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
return err
}
}
return r.lockfile.Touch()
}

func newContainerStore(dir string) (rwContainerStore, error) {
func (r *containerStore) saveFor(modifiedContainer *Container) error {
r.markModifiedFor(modifiedContainer)
return r.save()
}

func (r *containerStore) Save() error {
r.markAllModified()
return r.save()
}

func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock"))
lockDir := dir
if transient {
if err := os.MkdirAll(runDir, 0700); err != nil {
return nil, err
}
lockDir = runDir
}
lockfile, err := GetLockfile(filepath.Join(lockDir, "containers.lock"))
if err != nil {
return nil, err
}
cstore := containerStore{
lockfile: lockfile,
dir: dir,
runDir: runDir,
transient: transient,
containers: []*Container{},
byid: make(map[string]*Container),
bylayer: make(map[string]*Container),
Expand Down Expand Up @@ -385,7 +488,7 @@ func (r *containerStore) ClearFlag(id string, flag string) error {
return ErrContainerUnknown
}
delete(container.Flags, flag)
return r.Save()
return r.saveFor(container)
}

func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
Expand All @@ -397,7 +500,7 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
return r.Save()
return r.saveFor(container)
}

func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
Expand Down Expand Up @@ -443,6 +546,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
VolatileStore: options.Volatile || r.transient, // If transient, store *all* containers in volatile store
}
r.containers = append(r.containers, container)
r.byid[id] = container
Expand All @@ -453,7 +557,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
for _, name := range names {
r.byname[name] = container
}
err = r.Save()
err = r.saveFor(container)
container = copyContainer(container)
return container, err
}
Expand All @@ -468,7 +572,7 @@ func (r *containerStore) Metadata(id string) (string, error) {
func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.lookup(id); ok {
container.Metadata = metadata
return r.Save()
return r.saveFor(container)
}
return ErrContainerUnknown
}
Expand Down Expand Up @@ -497,7 +601,7 @@ func (r *containerStore) updateNames(id string, names []string, op updateNameOpe
r.byname[name] = container
}
container.Names = names
return r.Save()
return r.saveFor(container)
}

func (r *containerStore) Delete(id string) error {
Expand Down Expand Up @@ -529,7 +633,7 @@ func (r *containerStore) Delete(id string) error {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil {
if err := r.saveFor(container); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
Expand Down Expand Up @@ -676,7 +780,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
save = true
}
if save {
err = r.Save()
err = r.saveFor(c)
}
}
return err
Expand Down
1 change: 1 addition & 0 deletions layers.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,7 @@ func (r *layerStore) load(lockedForWriting bool) error {
}
}
if shouldSave {
r.markAllModified()
if err := r.saveLayers(); err != nil {
return err
}
Expand Down
10 changes: 6 additions & 4 deletions store.go
Original file line number Diff line number Diff line change
Expand Up @@ -801,14 +801,16 @@ func (s *store) load() error {
if err := os.MkdirAll(gcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath)
if err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return err
}

rcs, err := newContainerStore(gcpath, rcpath, s.transientStore)
if err != nil {
return err
}

s.containerStore = rcs

for _, store := range driver.AdditionalImageStores() {
Expand Down

0 comments on commit ce67cc3

Please sign in to comment.