From 72472bd300e3a11473eccb373e0ded6f9d9c32a7 Mon Sep 17 00:00:00 2001 From: Pavel Tumik <18602811+sagor999@users.noreply.github.com> Date: Thu, 28 Apr 2022 00:29:47 +0000 Subject: [PATCH 1/2] [ws-daemon] update log message to be unique --- components/ws-daemon/pkg/content/service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/components/ws-daemon/pkg/content/service.go b/components/ws-daemon/pkg/content/service.go index c571d7c13574f3..e26d44d70306d6 100644 --- a/components/ws-daemon/pkg/content/service.go +++ b/components/ws-daemon/pkg/content/service.go @@ -288,7 +288,7 @@ func (s *WorkspaceService) DisposeWorkspace(ctx context.Context, req *api.Dispos sess := s.store.Get(req.Id) if sess == nil { - return nil, status.Error(codes.NotFound, "workspace does not exist") + return nil, status.Error(codes.NotFound, "cannot find workspace during DisposeWorkspace") } // We were asked to do a backup of a session that was never ready. There seems to have been some state drift here - tell the caller. @@ -643,7 +643,7 @@ func (s *WorkspaceService) WaitForInit(ctx context.Context, req *api.WaitForInit session := s.store.Get(req.Id) if session == nil { - return nil, status.Error(codes.NotFound, "workspace does not exist") + return nil, status.Error(codes.NotFound, "cannot find workspace during WaitForInit") } // the next call will block until the workspace is initialized @@ -664,7 +664,7 @@ func (s *WorkspaceService) TakeSnapshot(ctx context.Context, req *api.TakeSnapsh sess := s.store.Get(req.Id) if sess == nil { - return nil, status.Error(codes.NotFound, "workspace does not exist") + return nil, status.Error(codes.NotFound, "cannot find workspace during TakeSnapshot") } if !sess.IsReady() { return nil, status.Error(codes.FailedPrecondition, "workspace is not ready") @@ -725,7 +725,7 @@ func (s *WorkspaceService) BackupWorkspace(ctx context.Context, req *api.BackupW // i.e. location = /mnt/disks/ssd0/workspaces- + req.Id // It would also need to setup the remote storagej // ... but in the worse case we *could* backup locally and then upload manually - return nil, status.Error(codes.NotFound, "workspace does not exist") + return nil, status.Error(codes.NotFound, "cannot find workspace during BackupWorkspace") } if sess.RemoteStorageDisabled { return nil, status.Errorf(codes.FailedPrecondition, "workspace has no remote storage") From f684c0c0e38fd71e57d9e9a1eda29ec8dad5f29a Mon Sep 17 00:00:00 2001 From: Pavel Tumik <18602811+sagor999@users.noreply.github.com> Date: Thu, 28 Apr 2022 00:30:26 +0000 Subject: [PATCH 2/2] [ws-manager] fix finalizeWorkspaceContent can be called twice --- components/ws-manager/pkg/manager/monitor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/components/ws-manager/pkg/manager/monitor.go b/components/ws-manager/pkg/manager/monitor.go index 9e5d80ba65f60e..e3eddcfe9d958e 100644 --- a/components/ws-manager/pkg/manager/monitor.go +++ b/components/ws-manager/pkg/manager/monitor.go @@ -376,8 +376,9 @@ func actOnPodEvent(ctx context.Context, m actingManager, status *api.WorkspaceSt } _, gone := wso.Pod.Annotations[wsk8s.ContainerIsGoneAnnotation] + _, alreadyFinalized := wso.Pod.Annotations[disposalStatusAnnotation] - if terminated || gone { + if (terminated || gone) && !alreadyFinalized { // We start finalizing the workspace content only after the container is gone. This way we ensure there's // no process modifying the workspace content as we create the backup. go m.finalizeWorkspaceContent(ctx, wso)