Skip to content

Commit

Permalink
workaround for azurefile pvc permission issues
Browse files Browse the repository at this point in the history
  • Loading branch information
saurabh-prakash committed Mar 2, 2022
1 parent cde485d commit 1fa0d6a
Show file tree
Hide file tree
Showing 6 changed files with 56 additions and 10 deletions.
38 changes: 32 additions & 6 deletions pkg/cachemanager/cachemanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ import (
)

const (
yarnLock = "yarn.lock"
packageLock = "package-lock.json"
npmShrinkwrap = "npm-shrinkwrap.json"
nodeModules = "node_modules"
packageJSON = "package.json"
defaultCompressedFileName = "cache.tzst"
yarnLock = "yarn.lock"
packageLock = "package-lock.json"
npmShrinkwrap = "npm-shrinkwrap.json"
nodeModules = "node_modules"
defaultCompressedFileName = "cache.tzst"
workspaceCompressedFilename = "workspace.tzst"
)

// cache represents the files/dirs that will be cached
Expand Down Expand Up @@ -152,6 +152,32 @@ func (c *cache) Upload(ctx context.Context, cacheKey string, itemsToCompress ...
return nil
}

func (c *cache) CacheWorkspace(ctx context.Context) error {
tmpDir := os.TempDir()
if err := c.zstd.Compress(ctx, workspaceCompressedFilename, true, tmpDir, global.HomeDir); err != nil {
return err
}
src := filepath.Join(tmpDir, workspaceCompressedFilename)
dst := filepath.Join(global.WorkspaceCacheDir, workspaceCompressedFilename)
if err := fileutils.CopyFile(src, dst, false); err != nil {
return err
}
return nil
}

func (c *cache) ExtractWorkspace(ctx context.Context) error {
tmpDir := os.TempDir()
src := filepath.Join(global.WorkspaceCacheDir, workspaceCompressedFilename)
dst := filepath.Join(tmpDir, workspaceCompressedFilename)
if err := fileutils.CopyFile(src, dst, false); err != nil {
return err
}
if err := c.zstd.Decompress(ctx, filepath.Join(tmpDir, workspaceCompressedFilename), true, global.HomeDir); err != nil {
return err
}
return nil
}

func (c *cache) getDefaultDirs() (string, error) {
f, err := os.Open(global.RepoDir)
if err != nil {
Expand Down
4 changes: 4 additions & 0 deletions pkg/core/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ type CacheStore interface {
Download(ctx context.Context, cacheKey string) error
// Upload creates, compresses and uploads cache at cacheKey
Upload(ctx context.Context, cacheKey string, itemsToCompress ...string) error
// CacheWorkspace caches the workspace onto a mounted volume
CacheWorkspace(ctx context.Context) error
// ExtractWorkspace extracts the workspace cache from mounted volume
ExtractWorkspace(ctx context.Context) error
}

// SecretParser defines operation for parsing the vault secrets in given path
Expand Down
16 changes: 16 additions & 0 deletions pkg/core/lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,14 @@ func (pl *Pipeline) Start(ctx context.Context) (err error) {
errRemark = fmt.Sprintf("Unable to clone repo: %s", payload.RepoLink)
return err
}
} else {
pl.Logger.Debugf("Extracting workspace")
// Replicate workspace
if err = pl.CacheStore.ExtractWorkspace(ctx); err != nil {
pl.Logger.Errorf("Error replicating workspace: %+v", err)
errRemark = errs.GenericErrRemark.Error()
return err
}
}

// load tas yaml file
Expand Down Expand Up @@ -256,6 +264,14 @@ func (pl *Pipeline) Start(ctx context.Context) (err error) {
// mark status as passed
taskPayload.Status = Passed

pl.Logger.Debugf("Caching workspace")
// Persist workspace
if err = pl.CacheStore.CacheWorkspace(ctx); err != nil {
pl.Logger.Errorf("Error caching workspace: %+v", err)
errRemark = errs.GenericErrRemark.Error()
return err
}

// Upload cache once for other builds
if err = pl.CacheStore.Upload(ctx, cacheKey, tasConfig.Cache.Paths...); err != nil {
pl.Logger.Errorf("Unable to upload cache: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/fileutils/fileutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func CopyDir(src, dst string, changeMode bool) (err error) {
return
}
if err == nil {
return fmt.Errorf("destination already exists")
return fmt.Errorf("destination %+v already exists", dst)
}

err = os.MkdirAll(dst, si.Mode())
Expand Down
1 change: 1 addition & 0 deletions pkg/global/nucleusconstants.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import "time"
const (
CoverageManifestFileName = "manifest.json"
HomeDir = "/home/nucleus"
WorkspaceCacheDir = "/workspace-cache"
RepoDir = HomeDir + "/repo"
CodeCoverageDir = RepoDir + "/coverage"
DefaultHTTPTimeout = 45 * time.Second
Expand Down
5 changes: 2 additions & 3 deletions pkg/zstd/zstd.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"strings"

"github.com/LambdaTest/synapse/pkg/core"
"github.com/LambdaTest/synapse/pkg/global"
"github.com/LambdaTest/synapse/pkg/lumber"
)

Expand Down Expand Up @@ -49,7 +48,7 @@ func (z *zstdCompressor) Compress(ctx context.Context, compressedFileName string
if preservePath {
args = append(args, "-P")
}
if err := z.execManager.ExecuteInternalCommands(ctx, core.Zstd, args, global.RepoDir, nil, nil); err != nil {
if err := z.execManager.ExecuteInternalCommands(ctx, core.Zstd, args, workingDirectory, nil, nil); err != nil {
z.logger.Errorf("error while zstd compression %v", err)
return err
}
Expand All @@ -62,7 +61,7 @@ func (z *zstdCompressor) Decompress(ctx context.Context, filePath string, preser
if preservePath {
args = append(args, "-P")
}
if err := z.execManager.ExecuteInternalCommands(ctx, core.Zstd, args, global.RepoDir, nil, nil); err != nil {
if err := z.execManager.ExecuteInternalCommands(ctx, core.Zstd, args, workingDirectory, nil, nil); err != nil {
z.logger.Errorf("error while zstd decompression %v", err)
return err
}
Expand Down

0 comments on commit 1fa0d6a

Please sign in to comment.