diff --git a/pkg/anago/anagofakes/fake_release_impl.go b/pkg/anago/anagofakes/fake_release_impl.go index 1bb31706cb0..464c9483cf7 100644 --- a/pkg/anago/anagofakes/fake_release_impl.go +++ b/pkg/anago/anagofakes/fake_release_impl.go @@ -1,19 +1,3 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - // Code generated by counterfeiter. DO NOT EDIT. package anagofakes @@ -28,6 +12,17 @@ import ( ) type FakeReleaseImpl struct { + ArchiveReleaseStub func(*release.ArchiverOptions) error + archiveReleaseMutex sync.RWMutex + archiveReleaseArgsForCall []struct { + arg1 *release.ArchiverOptions + } + archiveReleaseReturns struct { + result1 error + } + archiveReleaseReturnsOnCall map[int]struct { + result1 error + } BranchNeedsCreationStub func(string, string, semver.Version) (bool, error) branchNeedsCreationMutex sync.RWMutex branchNeedsCreationArgsForCall []struct { @@ -221,6 +216,67 @@ type FakeReleaseImpl struct { invocationsMutex sync.RWMutex } +func (fake *FakeReleaseImpl) ArchiveRelease(arg1 *release.ArchiverOptions) error { + fake.archiveReleaseMutex.Lock() + ret, specificReturn := fake.archiveReleaseReturnsOnCall[len(fake.archiveReleaseArgsForCall)] + fake.archiveReleaseArgsForCall = append(fake.archiveReleaseArgsForCall, struct { + arg1 *release.ArchiverOptions + }{arg1}) + stub := fake.ArchiveReleaseStub + fakeReturns := fake.archiveReleaseReturns + fake.recordInvocation("ArchiveRelease", []interface{}{arg1}) + fake.archiveReleaseMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeReleaseImpl) ArchiveReleaseCallCount() int { + fake.archiveReleaseMutex.RLock() + defer fake.archiveReleaseMutex.RUnlock() + return len(fake.archiveReleaseArgsForCall) +} + +func (fake *FakeReleaseImpl) ArchiveReleaseCalls(stub func(*release.ArchiverOptions) error) { + fake.archiveReleaseMutex.Lock() + defer fake.archiveReleaseMutex.Unlock() + fake.ArchiveReleaseStub = stub +} + +func (fake *FakeReleaseImpl) ArchiveReleaseArgsForCall(i int) *release.ArchiverOptions { + fake.archiveReleaseMutex.RLock() + defer fake.archiveReleaseMutex.RUnlock() + argsForCall := fake.archiveReleaseArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeReleaseImpl) ArchiveReleaseReturns(result1 error) { + fake.archiveReleaseMutex.Lock() + defer fake.archiveReleaseMutex.Unlock() + fake.ArchiveReleaseStub = nil + fake.archiveReleaseReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeReleaseImpl) ArchiveReleaseReturnsOnCall(i int, result1 error) { + fake.archiveReleaseMutex.Lock() + defer fake.archiveReleaseMutex.Unlock() + fake.ArchiveReleaseStub = nil + if fake.archiveReleaseReturnsOnCall == nil { + fake.archiveReleaseReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.archiveReleaseReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeReleaseImpl) BranchNeedsCreation(arg1 string, arg2 string, arg3 semver.Version) (bool, error) { fake.branchNeedsCreationMutex.Lock() ret, specificReturn := fake.branchNeedsCreationReturnsOnCall[len(fake.branchNeedsCreationArgsForCall)] @@ -1174,6 +1230,8 @@ func (fake *FakeReleaseImpl) ValidateImagesReturnsOnCall(i int, result1 error) { func (fake *FakeReleaseImpl) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.archiveReleaseMutex.RLock() + defer fake.archiveReleaseMutex.RUnlock() fake.branchNeedsCreationMutex.RLock() defer fake.branchNeedsCreationMutex.RUnlock() fake.checkPrerequisitesMutex.RLock() diff --git a/pkg/anago/release.go b/pkg/anago/release.go index 4e37b5eea25..563abf08596 100644 --- a/pkg/anago/release.go +++ b/pkg/anago/release.go @@ -141,6 +141,7 @@ type releaseImpl interface { PushBranches(pusher *release.GitObjectPusher, branchList []string) error PushMainBranch(pusher *release.GitObjectPusher) error NewGitPusher(opts *release.GitObjectPusherOptions) (*release.GitObjectPusher, error) + ArchiveRelease(options *release.ArchiverOptions) error } func (d *defaultReleaseImpl) Submit(options *gcb.Options) error { @@ -244,6 +245,11 @@ func (d *defaultReleaseImpl) CreateAnnouncement(options *announce.Options) error return announce.CreateForRelease(options) } +func (d *defaultReleaseImpl) ArchiveRelease(options *release.ArchiverOptions) error { + // Create a new release archiver + return release.NewArchiver(options).ArchiveRelease() +} + func (d *defaultReleaseImpl) PushTags( pusher *release.GitObjectPusher, tagList []string, ) error { @@ -444,4 +450,20 @@ func (d *DefaultRelease) CreateAnnouncement() error { return nil } -func (d *DefaultRelease) Archive() error { return nil } +// Archive stores the release artifact in a bucket along with +// its logs for long term conservation +func (d *DefaultRelease) Archive() error { + // Create a new options set for the release archiver + archiverOptions := &release.ArchiverOptions{ + ReleaseBuildDir: filepath.Join(workspaceDir, "src"), + LogFile: d.state.logFile, + BuildVersion: d.options.BuildVersion, + PrimeVersion: d.state.versions.Prime(), + Bucket: d.options.Bucket(), + } + + if err := d.impl.ArchiveRelease(archiverOptions); err != nil { + return errors.Wrap(err, "running the release archival process") + } + return nil +} diff --git a/pkg/release/archive.go b/pkg/release/archive.go index 1d10b478802..5b60475f5c4 100644 --- a/pkg/release/archive.go +++ b/pkg/release/archive.go @@ -19,6 +19,7 @@ package release import ( "fmt" "io/ioutil" + "os" "path/filepath" "strings" @@ -33,7 +34,9 @@ import ( ) const ( - archiveDirPrefix = "anago-" + archiveDirPrefix = "anago-" // Prefix for archive directories + archiveBucketPath = "archive" // Archiv sibdirectory in bucket + logsArchiveSubPath = "logs" // Logs subdirectory ) // Archiver stores the release build directory in a bucket @@ -56,23 +59,21 @@ func (archiver *Archiver) SetImpl(impl archiverImpl) { // ArchiverOptions set the options used when archiving a release type ArchiverOptions struct { ReleaseBuildDir string // Build directory that will be archived - LogsDirectory string // Subdirectory to get the logs from - - StageGCSPath string // Stage path in the bucket // ie gs://kubernetes-release/stage - ArchiveGCSPath string // Archive path in the bucket // ie gs://kubernetes-release/archive - - BuildVersion string // Version tag of the release we are archiving + LogFile string // Log file to process and include in the archive + PrimeVersion string // Final version tag + BuildVersion string // Build version from where this release has cut + Bucket string // Bucket we will use to archive and read staged data } // ArchiveBucketPath returns the bucket path we the release will be stored func (o *ArchiverOptions) ArchiveBucketPath() string { // local archive_bucket="gs://$RELEASE_BUCKET/archive" - if o.ArchiveGCSPath == "" || o.BuildVersion == "" { + if o.Bucket == "" || o.PrimeVersion == "" { return "" } gcs := object.NewGCS() archiveBucketPath, err := gcs.NormalizePath( - filepath.Join(o.ArchiveGCSPath, archiveDirPrefix+o.BuildVersion), + object.GcsPrefix + filepath.Join(o.Bucket, ArchivePath, archiveDirPrefix+o.PrimeVersion), ) if err != nil { logrus.Error(err) @@ -84,29 +85,33 @@ func (o *ArchiverOptions) ArchiveBucketPath() string { // Validate checks if the set values are correct and complete to // start running the archival process func (o *ArchiverOptions) Validate() error { - if o.LogsDirectory == "" { - return errors.New("missing logs subdirectory in archive options") - } - if o.ArchiveGCSPath == "" { - return errors.New("archival bucket location is missing from options") - } - if o.StageGCSPath == "" { - return errors.New("stage bucket location is missing from options") + if o.LogFile == "" { + return errors.New("release log file was not specified") } if !util.Exists(o.ReleaseBuildDir) { return errors.New("GCB worskapce directory does not exist") } - if !util.Exists(filepath.Join(o.LogsDirectory)) { - return errors.New("logs directory does not exist") + if !util.Exists(o.LogFile) { + return errors.New("logs file not found") } if o.BuildVersion == "" { - return errors.New("release tag in archiver options is empty") + return errors.New("build version tag in archiver options is empty") + } + if o.PrimeVersion == "" { + return errors.New("prime version tag in archiver options is empty") + } + if o.Bucket == "" { + return errors.New("archive bucket is not specified") } - // Check if the tag is well formed - _, err := util.TagStringToSemver(o.BuildVersion) - if err != nil { - return errors.Wrap(err, "verifying release tag") + // Check if the build version is well formed (used for cleaning old staged build) + if _, err := util.TagStringToSemver(o.BuildVersion); err != nil { + return errors.Wrap(err, "verifying build version tag") + } + + // Check if the prime version is well formed + if _, err := util.TagStringToSemver(o.PrimeVersion); err != nil { + return errors.Wrap(err, "verifying prime version tag") } return nil @@ -116,10 +121,9 @@ func (o *ArchiverOptions) Validate() error { type archiverImpl interface { CopyReleaseToBucket(string, string) error DeleteStalePasswordFiles(string) error - MakeFilesPrivate(string, []string) error - GetLogFiles(string) ([]string, error) + MakeFilesPrivate(string) error ValidateOptions(*ArchiverOptions) error - CopyReleaseLogs([]string, string) error + CopyReleaseLogs([]string, string, string) error CleanStagedBuilds(string, string) error } @@ -133,23 +137,9 @@ func (archiver *Archiver) ArchiveRelease() error { return errors.Wrap(err, "validating archive options") } - // local logfiles=$(ls $LOGFILE{,.[0-9]} 2>/dev/null || true) - // Before moving anything, find the log files (full path) - logFiles, err := archiver.impl.GetLogFiles(archiver.opts.LogsDirectory) - if err != nil { - return errors.Wrap(err, "getting files from logs directory") - } - // TODO: Is this still relevant? // local text="files" - // copy_logs_to_workdir - if err := archiver.impl.CopyReleaseLogs( - logFiles, archiver.opts.ReleaseBuildDir, - ); err != nil { - return errors.Wrap(err, "copying release logs to archive") - } - // # TODO: Copy $PROGSTATE as well to GCS and restore it if found // # also delete if complete or just delete once copied back to $TMPDIR // # This is so failures on GCB can be restarted / reentrant too. @@ -166,27 +156,36 @@ func (archiver *Archiver) ArchiveRelease() error { return errors.Wrap(err, "looking for stale password files") } - // Copy the logs to the bucket - if err = archiver.impl.CopyReleaseToBucket( + // Clean previous staged builds + if err := archiver.impl.CleanStagedBuilds( + object.GcsPrefix+filepath.Join(archiver.opts.Bucket, StagePath), + archiver.opts.BuildVersion, + ); err != nil { + return errors.Wrap(err, "deleting previous staged builds") + } + + // Copy the release to the bucket + if err := archiver.impl.CopyReleaseToBucket( archiver.opts.ReleaseBuildDir, archiver.opts.ArchiveBucketPath(), ); err != nil { return errors.Wrap(err, "while copying the release directory") } - // Make the logs private (remove AllUsers from GCS ACL) - if err := archiver.impl.MakeFilesPrivate( - archiver.opts.ArchiveBucketPath(), logFiles, + // copy_logs_to_workdir + if err := archiver.impl.CopyReleaseLogs( + []string{archiver.opts.LogFile}, + filepath.Join(archiver.opts.ReleaseBuildDir, logsArchiveSubPath), + filepath.Join(archiver.opts.ArchiveBucketPath(), logsArchiveSubPath), ); err != nil { - return errors.Wrapf(err, "setting private ACL on logs") + return errors.Wrap(err, "copying release logs to archive") } - // Clean previous staged builds - if err := archiver.impl.CleanStagedBuilds( - archiver.opts.StageGCSPath, - archiver.opts.BuildVersion, + // Make the logs private (remove AllUsers from the GCS ACL) + if err := archiver.impl.MakeFilesPrivate( + filepath.Join(archiver.opts.ArchiveBucketPath(), logsArchiveSubPath), ); err != nil { - return errors.Wrap(err, "deleting previous staged builds") + return errors.Wrapf(err, "setting private ACL on logs") } logrus.Info("Release archive complete") @@ -198,18 +197,17 @@ func (a *defaultArchiverImpl) ValidateOptions(o *ArchiverOptions) error { return errors.Wrap(o.Validate(), "validating options") } -// makeFilesPrivate updates the ACL on the logs to ensure they do not remain worl-readable -func (a *defaultArchiverImpl) MakeFilesPrivate( - archiveBucketPath string, logFiles []string, -) error { - for _, logFile := range logFiles { - logrus.Infof("Ensure PRIVATE ACL on %s/%s", archiveBucketPath, logFile) - // logrun -s $GSUTIL acl ch -d AllUsers "$archive_bucket/$build_dir/${LOGFILE##*/}*" || true - if err := gcp.GSUtil( - "acl", "ch", "-d", "AllUsers", filepath.Join(archiveBucketPath, logFile), - ); err != nil { - return errors.Wrapf(err, "removing public access from %s", logFile) - } +// makeFilesPrivate updates the ACL on all files in a directory +func (a *defaultArchiverImpl) MakeFilesPrivate(archiveBucketPath string) error { + logrus.Infof("Ensure PRIVATE ACL on %s/*", archiveBucketPath) + gcs := object.NewGCS() + logsPath, err := gcs.NormalizePath(archiveBucketPath + "/*") + if err != nil { + return errors.Wrap(err, "normalizing gcs path to modify ACL") + } + // logrun -s $GSUTIL acl ch -d AllUsers "$archive_bucket/$build_dir/${LOGFILE##*/}*" || true + if err := gcp.GSUtil("acl", "ch", "-d", "AllUsers", logsPath); err != nil { + return errors.Wrapf(err, "removing public access from files in %s", archiveBucketPath) } return nil } @@ -227,7 +225,23 @@ func (a *defaultArchiverImpl) DeleteStalePasswordFiles(releaseBuildDir string) e // copyReleaseLogs gets a slice of log file names. Those files are // sanitized to remove sensitive data and control characters and then are // copied to the GCB working directory. -func (a *defaultArchiverImpl) CopyReleaseLogs(logFiles []string, targetDir string) error { +func (a *defaultArchiverImpl) CopyReleaseLogs( + logFiles []string, targetDir, archiveBucketLogsPath string, +) (err error) { + // Verify the destination bucket address is correct + gcs := object.NewGCS() + if archiveBucketLogsPath != "" { + archiveBucketLogsPath, err = gcs.NormalizePath(archiveBucketLogsPath) + if err != nil { + return errors.Wrap(err, "normalizing remote logfile destination") + } + } + // Check the destination directory exists + if !util.Exists(targetDir) { + if err := os.Mkdir(targetDir, os.FileMode(0o755)); err != nil { + return errors.Wrap(err, "creating logs archive directory") + } + } for _, fileName := range logFiles { // Strip the logfiles from control chars and sensitive data if err := util.CleanLogFile(fileName); err != nil { @@ -241,6 +255,15 @@ func (a *defaultArchiverImpl) CopyReleaseLogs(logFiles []string, targetDir strin return errors.Wrapf(err, "Copying logfile %s to %s", fileName, targetDir) } } + // TODO: Grab previous log files from stage and copy them to logs dir + + // Rsync log files to remote location if a bucket is specified + if archiveBucketLogsPath != "" { + logrus.Infof("Rsyncing logs to remote bucket %s", archiveBucketLogsPath) + if err := gcs.RsyncRecursive(targetDir, archiveBucketLogsPath); err != nil { + return errors.Wrap(err, "while synching log files to remote bucket addr") + } + } return nil } @@ -250,14 +273,17 @@ func (a *defaultArchiverImpl) CopyReleaseToBucket(releaseBuildDir, archiveBucket // Create a GCS cliente to copy the release gcs := object.NewGCS() + remoteDest, err := gcs.NormalizePath(archiveBucketPath) + if err != nil { + return errors.Wrap(err, "normalizing destination path") + } - logrus.Infof("Copy %s $text to %s...", releaseBuildDir, archiveBucketPath) + logrus.Infof("Copy %s to %s...", releaseBuildDir, remoteDest) // logrun $GSUTIL -mq cp $dash_args $WORKDIR/* $archive_bucket/$build_dir || true - if err := gcs.CopyToRemote(releaseBuildDir, archiveBucketPath); err != nil { + if err := gcs.RsyncRecursive(releaseBuildDir, remoteDest); err != nil { return errors.Wrap(err, "copying release directory to bucket") } - return nil } diff --git a/pkg/release/archive_test.go b/pkg/release/archive_test.go index d0987d20860..45f73146871 100644 --- a/pkg/release/archive_test.go +++ b/pkg/release/archive_test.go @@ -41,12 +41,6 @@ func TestArchiveRelease(t *testing.T) { }, shouldErr: true, }, - { // failure GetLogFiles fails - prepare: func(mock *releasefakes.FakeArchiverImpl) { - mock.GetLogFilesReturns([]string{}, err) - }, - shouldErr: true, - }, { // failure CopyReleaseLogsReturns errors prepare: func(mock *releasefakes.FakeArchiverImpl) { mock.CopyReleaseLogsReturns(err) diff --git a/pkg/release/archive_unit_test.go b/pkg/release/archive_unit_test.go index 81f6566ce3f..18ed605998d 100644 --- a/pkg/release/archive_unit_test.go +++ b/pkg/release/archive_unit_test.go @@ -23,6 +23,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "k8s.io/release/pkg/object" ) const fictionalTestBucketName = "kubernetes-test-name" @@ -56,7 +58,7 @@ func TestCopyReleaseLogsToWorkDir(t *testing.T) { // Create the implementation impl := &defaultArchiverImpl{} // Copy the log files to the mock directory - err = impl.CopyReleaseLogs([]string{tmp1.Name(), tmp2.Name()}, destDir) + err = impl.CopyReleaseLogs([]string{tmp1.Name(), tmp2.Name()}, destDir, "") require.Nil(t, err) // Reopoen the files to check them @@ -84,11 +86,11 @@ func TestArchiveBucketPath(t *testing.T) { opts := ArchiverOptions{ // Here, we test without "gs://", the gcs package should // normalize the location with or without - ArchiveGCSPath: fictionalTestBucketName + "/archive", - BuildVersion: "v1.20.0-beta.2", + Bucket: fictionalTestBucketName, + PrimeVersion: "v1.20.0-beta.2", } require.Equal(t, - "gs://"+filepath.Join(fictionalTestBucketName, "archive", archiveDirPrefix+opts.BuildVersion), + object.GcsPrefix+filepath.Join(fictionalTestBucketName, archiveBucketPath, archiveDirPrefix+opts.PrimeVersion), opts.ArchiveBucketPath(), ) } @@ -106,16 +108,17 @@ func TestValidateOpts(t *testing.T) { // With complete values, still should not validate as most // directories do not exist - testOpts.ArchiveGCSPath = "gcs://kubernetes-test-name/archive" - testOpts.StageGCSPath = "gcs://kubernetes-test-name/archive" - testOpts.LogsDirectory = filepath.Join(dir, "/tmp/") - testOpts.BuildVersion = "v1.20.0-beta.1.687+3af376d3ad5009" + testOpts.Bucket = "kubernetes-test-name" + testOpts.PrimeVersion = "v1.20.0-beta.1" + testOpts.BuildVersion = "v1.20.0-beta.0.80+cdfd82733af78c" testOpts.ReleaseBuildDir = filepath.Join(dir, testOpts.BuildVersion) require.NotNil(t, testOpts.Validate()) - // Creating the logs dir (/workdir/tmp) should still not + // Creating a test log (/workdir/tmp) should still not // validate, build dir is missing - require.Nil(t, os.Mkdir(testOpts.LogsDirectory, os.FileMode(0o755))) + tmplog, err := ioutil.TempFile(os.TempDir(), "anago-test-log-") + require.Nil(t, err) + testOpts.LogFile = tmplog.Name() require.NotNil(t, testOpts.Validate()) // Finally create the build dir and we're done diff --git a/pkg/release/release.go b/pkg/release/release.go index 7e2ea07f8b8..62f687736f4 100644 --- a/pkg/release/release.go +++ b/pkg/release/release.go @@ -123,6 +123,9 @@ const ( // The default bazel build directory. BazelBuildDir = "bazel-bin/build" + + // Archive path is the root path in the bucket where releases are archived + ArchivePath = "archive" ) var ( diff --git a/pkg/release/releasefakes/fake_archiver_impl.go b/pkg/release/releasefakes/fake_archiver_impl.go index 42ecae49717..d304b90c440 100644 --- a/pkg/release/releasefakes/fake_archiver_impl.go +++ b/pkg/release/releasefakes/fake_archiver_impl.go @@ -1,19 +1,3 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - // Code generated by counterfeiter. DO NOT EDIT. package releasefakes @@ -36,11 +20,12 @@ type FakeArchiverImpl struct { cleanStagedBuildsReturnsOnCall map[int]struct { result1 error } - CopyReleaseLogsStub func([]string, string) error + CopyReleaseLogsStub func([]string, string, string) error copyReleaseLogsMutex sync.RWMutex copyReleaseLogsArgsForCall []struct { arg1 []string arg2 string + arg3 string } copyReleaseLogsReturns struct { result1 error @@ -71,24 +56,10 @@ type FakeArchiverImpl struct { deleteStalePasswordFilesReturnsOnCall map[int]struct { result1 error } - GetLogFilesStub func(string) ([]string, error) - getLogFilesMutex sync.RWMutex - getLogFilesArgsForCall []struct { - arg1 string - } - getLogFilesReturns struct { - result1 []string - result2 error - } - getLogFilesReturnsOnCall map[int]struct { - result1 []string - result2 error - } - MakeFilesPrivateStub func(string, []string) error + MakeFilesPrivateStub func(string) error makeFilesPrivateMutex sync.RWMutex makeFilesPrivateArgsForCall []struct { arg1 string - arg2 []string } makeFilesPrivateReturns struct { result1 error @@ -173,7 +144,7 @@ func (fake *FakeArchiverImpl) CleanStagedBuildsReturnsOnCall(i int, result1 erro }{result1} } -func (fake *FakeArchiverImpl) CopyReleaseLogs(arg1 []string, arg2 string) error { +func (fake *FakeArchiverImpl) CopyReleaseLogs(arg1 []string, arg2 string, arg3 string) error { var arg1Copy []string if arg1 != nil { arg1Copy = make([]string, len(arg1)) @@ -184,13 +155,14 @@ func (fake *FakeArchiverImpl) CopyReleaseLogs(arg1 []string, arg2 string) error fake.copyReleaseLogsArgsForCall = append(fake.copyReleaseLogsArgsForCall, struct { arg1 []string arg2 string - }{arg1Copy, arg2}) + arg3 string + }{arg1Copy, arg2, arg3}) stub := fake.CopyReleaseLogsStub fakeReturns := fake.copyReleaseLogsReturns - fake.recordInvocation("CopyReleaseLogs", []interface{}{arg1Copy, arg2}) + fake.recordInvocation("CopyReleaseLogs", []interface{}{arg1Copy, arg2, arg3}) fake.copyReleaseLogsMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1, arg2, arg3) } if specificReturn { return ret.result1 @@ -204,17 +176,17 @@ func (fake *FakeArchiverImpl) CopyReleaseLogsCallCount() int { return len(fake.copyReleaseLogsArgsForCall) } -func (fake *FakeArchiverImpl) CopyReleaseLogsCalls(stub func([]string, string) error) { +func (fake *FakeArchiverImpl) CopyReleaseLogsCalls(stub func([]string, string, string) error) { fake.copyReleaseLogsMutex.Lock() defer fake.copyReleaseLogsMutex.Unlock() fake.CopyReleaseLogsStub = stub } -func (fake *FakeArchiverImpl) CopyReleaseLogsArgsForCall(i int) ([]string, string) { +func (fake *FakeArchiverImpl) CopyReleaseLogsArgsForCall(i int) ([]string, string, string) { fake.copyReleaseLogsMutex.RLock() defer fake.copyReleaseLogsMutex.RUnlock() argsForCall := fake.copyReleaseLogsArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } func (fake *FakeArchiverImpl) CopyReleaseLogsReturns(result1 error) { @@ -363,88 +335,18 @@ func (fake *FakeArchiverImpl) DeleteStalePasswordFilesReturnsOnCall(i int, resul }{result1} } -func (fake *FakeArchiverImpl) GetLogFiles(arg1 string) ([]string, error) { - fake.getLogFilesMutex.Lock() - ret, specificReturn := fake.getLogFilesReturnsOnCall[len(fake.getLogFilesArgsForCall)] - fake.getLogFilesArgsForCall = append(fake.getLogFilesArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetLogFilesStub - fakeReturns := fake.getLogFilesReturns - fake.recordInvocation("GetLogFiles", []interface{}{arg1}) - fake.getLogFilesMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeArchiverImpl) GetLogFilesCallCount() int { - fake.getLogFilesMutex.RLock() - defer fake.getLogFilesMutex.RUnlock() - return len(fake.getLogFilesArgsForCall) -} - -func (fake *FakeArchiverImpl) GetLogFilesCalls(stub func(string) ([]string, error)) { - fake.getLogFilesMutex.Lock() - defer fake.getLogFilesMutex.Unlock() - fake.GetLogFilesStub = stub -} - -func (fake *FakeArchiverImpl) GetLogFilesArgsForCall(i int) string { - fake.getLogFilesMutex.RLock() - defer fake.getLogFilesMutex.RUnlock() - argsForCall := fake.getLogFilesArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeArchiverImpl) GetLogFilesReturns(result1 []string, result2 error) { - fake.getLogFilesMutex.Lock() - defer fake.getLogFilesMutex.Unlock() - fake.GetLogFilesStub = nil - fake.getLogFilesReturns = struct { - result1 []string - result2 error - }{result1, result2} -} - -func (fake *FakeArchiverImpl) GetLogFilesReturnsOnCall(i int, result1 []string, result2 error) { - fake.getLogFilesMutex.Lock() - defer fake.getLogFilesMutex.Unlock() - fake.GetLogFilesStub = nil - if fake.getLogFilesReturnsOnCall == nil { - fake.getLogFilesReturnsOnCall = make(map[int]struct { - result1 []string - result2 error - }) - } - fake.getLogFilesReturnsOnCall[i] = struct { - result1 []string - result2 error - }{result1, result2} -} - -func (fake *FakeArchiverImpl) MakeFilesPrivate(arg1 string, arg2 []string) error { - var arg2Copy []string - if arg2 != nil { - arg2Copy = make([]string, len(arg2)) - copy(arg2Copy, arg2) - } +func (fake *FakeArchiverImpl) MakeFilesPrivate(arg1 string) error { fake.makeFilesPrivateMutex.Lock() ret, specificReturn := fake.makeFilesPrivateReturnsOnCall[len(fake.makeFilesPrivateArgsForCall)] fake.makeFilesPrivateArgsForCall = append(fake.makeFilesPrivateArgsForCall, struct { arg1 string - arg2 []string - }{arg1, arg2Copy}) + }{arg1}) stub := fake.MakeFilesPrivateStub fakeReturns := fake.makeFilesPrivateReturns - fake.recordInvocation("MakeFilesPrivate", []interface{}{arg1, arg2Copy}) + fake.recordInvocation("MakeFilesPrivate", []interface{}{arg1}) fake.makeFilesPrivateMutex.Unlock() if stub != nil { - return stub(arg1, arg2) + return stub(arg1) } if specificReturn { return ret.result1 @@ -458,17 +360,17 @@ func (fake *FakeArchiverImpl) MakeFilesPrivateCallCount() int { return len(fake.makeFilesPrivateArgsForCall) } -func (fake *FakeArchiverImpl) MakeFilesPrivateCalls(stub func(string, []string) error) { +func (fake *FakeArchiverImpl) MakeFilesPrivateCalls(stub func(string) error) { fake.makeFilesPrivateMutex.Lock() defer fake.makeFilesPrivateMutex.Unlock() fake.MakeFilesPrivateStub = stub } -func (fake *FakeArchiverImpl) MakeFilesPrivateArgsForCall(i int) (string, []string) { +func (fake *FakeArchiverImpl) MakeFilesPrivateArgsForCall(i int) string { fake.makeFilesPrivateMutex.RLock() defer fake.makeFilesPrivateMutex.RUnlock() argsForCall := fake.makeFilesPrivateArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakeArchiverImpl) MakeFilesPrivateReturns(result1 error) { @@ -566,8 +468,6 @@ func (fake *FakeArchiverImpl) Invocations() map[string][][]interface{} { defer fake.copyReleaseToBucketMutex.RUnlock() fake.deleteStalePasswordFilesMutex.RLock() defer fake.deleteStalePasswordFilesMutex.RUnlock() - fake.getLogFilesMutex.RLock() - defer fake.getLogFilesMutex.RUnlock() fake.makeFilesPrivateMutex.RLock() defer fake.makeFilesPrivateMutex.RUnlock() fake.validateOptionsMutex.RLock()