From 410127a08aa82b905de3a46620a98bd9c2d8f22f Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Fri, 2 Feb 2024 20:36:29 -0500 Subject: [PATCH 01/29] Add first draft of buftarget --- private/buf/buffetch/internal/internal.go | 20 +- private/buf/buffetch/internal/reader.go | 27 +-- private/buf/buffetch/internal/reader_test.go | 5 +- private/buf/buftarget/bucket_targeting.go | 205 +++++++++++++++++++ private/buf/buftarget/buftarget.go | 15 ++ private/buf/buftarget/terminate.go | 34 +++ private/buf/buftarget/usage.gen.go | 19 ++ 7 files changed, 294 insertions(+), 31 deletions(-) create mode 100644 private/buf/buftarget/bucket_targeting.go create mode 100644 private/buf/buftarget/buftarget.go create mode 100644 private/buf/buftarget/terminate.go create mode 100644 private/buf/buftarget/usage.gen.go diff --git a/private/buf/buffetch/internal/internal.go b/private/buf/buffetch/internal/internal.go index 74eacb1fe5..94519c2db8 100644 --- a/private/buf/buffetch/internal/internal.go +++ b/private/buf/buffetch/internal/internal.go @@ -21,6 +21,7 @@ import ( "net/http" "strconv" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" @@ -829,7 +830,7 @@ func WithGetReadBucketCloserCopyToInMemory() GetReadBucketCloserOption { // See bufconfig.TerminateAtControllingWorkspace, which is the only thing that uses this. // This is used by both non-ProtoFileRefs to find the controlling workspace, AND ProtoFileRefs // to find the controlling workspace of an enclosing module or workspace. -func WithGetReadBucketCloserTerminateFunc(terminateFunc TerminateFunc) GetReadBucketCloserOption { +func WithGetReadBucketCloserTerminateFunc(terminateFunc buftarget.TerminateFunc) GetReadBucketCloserOption { return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { getReadBucketCloserOptions.terminateFunc = terminateFunc } @@ -841,7 +842,7 @@ func WithGetReadBucketCloserTerminateFunc(terminateFunc TerminateFunc) GetReadBu // See bufconfig.TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef, which is the only thing that uses this. // This finds the enclosing module or workspace. // This is only used for ProtoFileRefs. -func WithGetReadBucketCloserProtoFileTerminateFunc(protoFileTerminateFunc TerminateFunc) GetReadBucketCloserOption { +func WithGetReadBucketCloserProtoFileTerminateFunc(protoFileTerminateFunc buftarget.TerminateFunc) GetReadBucketCloserOption { return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { getReadBucketCloserOptions.protoFileTerminateFunc = protoFileTerminateFunc } @@ -857,25 +858,12 @@ type GetReadWriteBucketOption func(*getReadWriteBucketOptions) // See bufconfig.TerminateAtControllingWorkspace, which is the only thing that uses this. // This is used by both non-ProtoFileRefs to find the controlling workspace, AND ProtoFileRefs // to find the controlling workspace of an enclosing module or workspace. -func WithGetReadWriteBucketTerminateFunc(terminateFunc TerminateFunc) GetReadWriteBucketOption { +func WithGetReadWriteBucketTerminateFunc(terminateFunc buftarget.TerminateFunc) GetReadWriteBucketOption { return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { getReadWriteBucketOptions.terminateFunc = terminateFunc } } -// TerminateFunc is a termination function. -// -// This function should return true if the search should terminate at the prefix, that is -// the prefix should be where the bucket is mapped onto. -// -// The original subDirPath is also given to the TerminateFunc. -type TerminateFunc func( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (terminate bool, err error) - // PutFileOption is a PutFile option. type PutFileOption func(*putFileOptions) diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 54c2d896f4..f9f1346010 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/git" @@ -234,7 +235,7 @@ func (r *reader) getArchiveBucket( ctx context.Context, container app.EnvStdinContainer, archiveRef ArchiveRef, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (_ ReadBucketCloser, retErr error) { subDirPath, err := normalpath.NormalizeAndValidate(archiveRef.SubDirPath()) if err != nil { @@ -293,7 +294,7 @@ func (r *reader) getDirBucket( ctx context.Context, container app.EnvStdinContainer, dirRef DirRef, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (ReadWriteBucket, error) { if !r.localEnabled { return nil, NewReadLocalDisabledError() @@ -305,8 +306,8 @@ func (r *reader) getProtoFileBucket( ctx context.Context, container app.EnvStdinContainer, protoFileRef ProtoFileRef, - terminateFunc TerminateFunc, - protoFileTerminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, + protoFileTerminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { if !r.localEnabled { return nil, NewReadLocalDisabledError() @@ -325,7 +326,7 @@ func (r *reader) getGitBucket( ctx context.Context, container app.EnvStdinContainer, gitRef GitRef, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (_ ReadBucketCloser, retErr error) { if !r.gitEnabled { return nil, NewReadGitDisabledError() @@ -541,7 +542,7 @@ func getReadBucketCloserForBucket( logger *zap.Logger, inputBucket storage.ReadBucketCloser, inputSubDirPath string, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { mapPath, subDirPath, _, err := getMapPathAndSubDirPath(ctx, logger, inputBucket, inputSubDirPath, terminateFunc) if err != nil { @@ -589,7 +590,7 @@ func getReadWriteBucketForOS( logger *zap.Logger, storageosProvider storageos.Provider, inputDirPath string, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (ReadWriteBucket, error) { inputDirPath = normalpath.Normalize(inputDirPath) absInputDirPath, err := normalpath.NormalizeAndAbsolute(inputDirPath) @@ -700,8 +701,8 @@ func getReadBucketCloserForOSProtoFile( logger *zap.Logger, storageosProvider storageos.Provider, protoFilePath string, - terminateFunc TerminateFunc, - protoFileTerminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, + protoFileTerminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { // First, we figure out which directory we consider to be the module that encapsulates // this ProtoFileRef. If we find a buf.yaml or buf.work.yaml, then we use that as the directory. If we @@ -849,7 +850,7 @@ func getMapPathAndSubDirPath( logger *zap.Logger, inputBucket storage.ReadBucket, inputSubDirPath string, - terminateFunc TerminateFunc, + terminateFunc buftarget.TerminateFunc, ) (mapPath string, subDirPath string, terminate bool, retErr error) { inputSubDirPath, err := normalpath.NormalizeAndValidate(inputSubDirPath) if err != nil { @@ -939,8 +940,8 @@ func newGetFileOptions() *getFileOptions { } type getReadBucketCloserOptions struct { - terminateFunc TerminateFunc - protoFileTerminateFunc TerminateFunc + terminateFunc buftarget.TerminateFunc + protoFileTerminateFunc buftarget.TerminateFunc copyToInMemory bool } @@ -949,7 +950,7 @@ func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { } type getReadWriteBucketOptions struct { - terminateFunc TerminateFunc + terminateFunc buftarget.TerminateFunc } func newGetReadWriteBucketOptions() *getReadWriteBucketOptions { diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index 546545d296..6949b3c2c4 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -19,6 +19,7 @@ import ( "path/filepath" "testing" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/osext" "github.com/bufbuild/buf/private/pkg/storage" @@ -404,8 +405,8 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { require.NoError(t, readBucketCloser.Close()) } -func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) TerminateFunc { - return TerminateFunc( +func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) buftarget.TerminateFunc { + return buftarget.TerminateFunc( func( ctx context.Context, bucket storage.ReadBucket, diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go new file mode 100644 index 0000000000..e035830c59 --- /dev/null +++ b/private/buf/buftarget/bucket_targeting.go @@ -0,0 +1,205 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget + +import ( + "context" + "fmt" + + "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/storage" + "go.uber.org/zap" +) + +// TODO(doria): we need to add the following info: +// BufWorkYAMLDirPaths (for v1 vs. v2 workspaces) + +// BucketTargeting provides the path to the controllng workspace, target paths, and target +// exclude paths mapped to the bucket. +type BucketTargeting interface { + // ControllingWorkspace returns the mapped path for the controlling workspace. + ControllingWorkspace() string + // TargetPaths returns the target paths mapped to the bucket. + TargetPaths() []string + // TargetExcludePaths returns the target exclude paths mapped to the bucket. + TargetExcludePaths() []string + // Terminated returns whether the controlling workspace was found through the terminateFunc. + Terminated() bool + + isBucketTargeting() +} + +func NewBucketTargeting( + ctx context.Context, + logger *zap.Logger, + inputBucket storage.ReadBucket, + inputSubDirPath string, + targetPaths []string, + targetExcludePaths []string, + terminateFunc TerminateFunc, // TODO(doria): move that out of buffetch +) (BucketTargeting, error) { + return newBucketTargeting(ctx, logger, inputBucket, inputSubDirPath, targetPaths, targetExcludePaths, terminateFunc) +} + +// *** PRIVATE *** + +var ( + _ BucketTargeting = &bucketTargeting{} +) + +type bucketTargeting struct { + controllingWorkspace string + targetPaths []string + targetExcludePaths []string + terminated bool +} + +func (b *bucketTargeting) ControllingWorkspace() string { + return b.controllingWorkspace +} + +func (b *bucketTargeting) TargetPaths() []string { + return b.targetPaths +} + +func (b *bucketTargeting) TargetExcludePaths() []string { + return b.targetExcludePaths +} + +func (b *bucketTargeting) Terminated() bool { + return b.terminated +} + +func (b *bucketTargeting) isBucketTargeting() { +} + +func newBucketTargeting( + ctx context.Context, + logger *zap.Logger, + inputBucket storage.ReadBucket, + inputPath string, + targetPaths []string, + targetExcludePaths []string, + terminateFunc TerminateFunc, +) (*bucketTargeting, error) { + // First we map the controlling workspace for the inputPath. + controllingWorkspace, mappedInputPath, terminated, err := mapControllingWorkspaceAndPath( + ctx, + logger, + inputBucket, + inputPath, + terminateFunc, + ) + if err != nil { + return nil, err + } + // The input path should not be treated differently from just a normal target path. + mappedTargetPaths := []string{mappedInputPath} + // Then we do the same for each target path. If the target paths resolve to different + // controlling workspaces, then we return an error. + // We currently do not compile nested workspaces, but this algorithm lets us potentially + // handle nested workspaces in the future. + // TODO(doria): this shouldn't have a big impact on performance, right? + // TODO(doria): do we need to handle that there was a termination through the paths? maybe. + for _, targetPath := range targetPaths { + controllingWorkspaceForPath, mappedPath, _, err := mapControllingWorkspaceAndPath( + ctx, + logger, + inputBucket, + targetPath, + terminateFunc, + ) + if err != nil { + return nil, err + } + if controllingWorkspaceForPath != controllingWorkspace { + return nil, fmt.Errorf("more than one workspace resolved for given paths: %q, %q", controllingWorkspaceForPath, controllingWorkspace) + } + mappedTargetPaths = append(mappedTargetPaths, mappedPath) + } + // NOTE: we do not map excluded paths to their own workspaces -- we use the controlling + // workspace we resolved through our input path and target paths. If an excluded path does + // not exist, we do not validate this. + var mappedTargetExcludePaths []string + for _, targetExcludePath := range targetExcludePaths { + mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace, targetExcludePath) + if err != nil { + return nil, err + } + mappedTargetExcludePaths = append(mappedTargetExcludePaths, mappedTargetExcludePath) + } + return &bucketTargeting{ + controllingWorkspace: controllingWorkspace, + targetPaths: mappedTargetPaths, + targetExcludePaths: mappedTargetExcludePaths, + terminated: terminated, + }, nil +} + +// mapControllingWorkspaceAndPath takes a bucket, path, and terminate func and returns the +// controlling workspace path and mapped path. +func mapControllingWorkspaceAndPath( + ctx context.Context, + logger *zap.Logger, + inputBucket storage.ReadBucket, + path string, + terminateFunc TerminateFunc, +) (string, string, bool, error) { + path, err := normalpath.NormalizeAndValidate(path) + if err != nil { + return "", "", err + } + // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at + // the path. + if terminateFunc == nil { + return path, ".", nil + } + // We can't do this in a traditional loop like this: + // + // for curDirPath := path; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { + // + // If we do that, then we don't run terminateFunc for ".", which we want to so that we get + // the correct value for the terminate bool. + // + // Instead, we effectively do a do-while loop. + curDirPath := path + for { + terminate, err := terminateFunc(ctx, inputBucket, curDirPath, path) + if err != nil { + return "", "", false, err + } + if terminate { + logger.Debug( + "buffetch termination found", + zap.String("curDirPath", curDirPath), + zap.String("path", path), + ) + subDirPath, err := normalpath.Rel(curDirPath, path) + if err != nil { + return "", "", false, err + } + return curDirPath, subDirPath, true, nil + } + if curDirPath == "." { + break + } + curDirPath = normalpath.Dir(curDirPath) + } + logger.Debug( + "buffetch no termination found", + zap.String("path", path), + ) + return curDirPath, ".", false, nil +} diff --git a/private/buf/buftarget/buftarget.go b/private/buf/buftarget/buftarget.go new file mode 100644 index 0000000000..9bc8d96cd2 --- /dev/null +++ b/private/buf/buftarget/buftarget.go @@ -0,0 +1,15 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget diff --git a/private/buf/buftarget/terminate.go b/private/buf/buftarget/terminate.go new file mode 100644 index 0000000000..251e9ed35e --- /dev/null +++ b/private/buf/buftarget/terminate.go @@ -0,0 +1,34 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget + +import ( + "context" + + "github.com/bufbuild/buf/private/pkg/storage" +) + +// TerminateFunc is a termination function. +// +// This function should return true if the search should terminate at the prefix, that is +// the prefix should be where the bucket is mapped onto. +// +// The original subDirPath is also given to the TerminateFunc. +type TerminateFunc func( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalSubDirPath string, +) (terminate bool, err error) diff --git a/private/buf/buftarget/usage.gen.go b/private/buf/buftarget/usage.gen.go new file mode 100644 index 0000000000..ad6e3b6834 --- /dev/null +++ b/private/buf/buftarget/usage.gen.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Generated. DO NOT EDIT. + +package buftarget + +import _ "github.com/bufbuild/buf/private/usage" From 33572a598335dbfa865fea06235285d5031187a4 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Thu, 15 Feb 2024 13:31:11 -0500 Subject: [PATCH 02/29] Plumb `buftarget` through to `buffetch/internal` This has some failing tests in `reader_test.go` because we are changing the behaviour of `ProtoFileRef`. We need to wire the logic a couple of layers up before we change these tests and ensure that everything works. --- private/buf/buffetch/internal/internal.go | 9 + .../buffetch/internal/read_bucket_closer.go | 31 +- .../buffetch/internal/read_write_bucket.go | 29 +- private/buf/buffetch/internal/reader.go | 347 ++++-------------- private/buf/buffetch/internal/reader_test.go | 7 - private/buf/buftarget/bucket_targeting.go | 82 +++-- 6 files changed, 183 insertions(+), 322 deletions(-) diff --git a/private/buf/buffetch/internal/internal.go b/private/buf/buffetch/internal/internal.go index 94519c2db8..d55c744dd0 100644 --- a/private/buf/buffetch/internal/internal.go +++ b/private/buf/buffetch/internal/internal.go @@ -468,6 +468,15 @@ type Reader interface { moduleRef ModuleRef, options ...GetModuleOption, ) (bufmodule.ModuleKey, error) + + // TODO(doria): rename this + // GetBucketTargeting( + // ctx context.Context, + // inputSubDirPath string, + // targetPaths []string, + // targetExcludePaths []string, + // terminateFunc TerminateFunc, // TODO(doria): move that out of buffetch + // ) (buftarget.BucketTargeting, error) } // NewReader returns a new Reader. diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index 078a81b831..e0034c7c4f 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -16,7 +16,10 @@ package internal import ( "context" + "fmt" + "path/filepath" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagemem" @@ -33,17 +36,33 @@ type readBucketCloser struct { func newReadBucketCloser( storageReadBucketCloser storage.ReadBucketCloser, - subDirPath string, - pathForExternalPath func(string) (string, error), + bucketTargeting buftarget.BucketTargeting, ) (*readBucketCloser, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(subDirPath) + normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) if err != nil { return nil, err } return &readBucketCloser{ - ReadBucketCloser: storageReadBucketCloser, - subDirPath: normalizedSubDirPath, - pathForExternalPath: pathForExternalPath, + ReadBucketCloser: storageReadBucketCloser, + subDirPath: normalizedSubDirPath, + // This turns paths that were done relative to the root of the input into paths + // that are now relative to the mapped bucket. + // + // This happens if you do i.e. .git#subdir=foo/bar --path foo/bar/baz.proto + // We need to turn the path into baz.proto + pathForExternalPath: func(externalPath string) (string, error) { + if filepath.IsAbs(externalPath) { + return "", fmt.Errorf("%s: absolute paths cannot be used for this input type", externalPath) + } + if !normalpath.EqualsOrContainsPath(bucketTargeting.ControllingWorkspacePath(), externalPath, normalpath.Relative) { + return "", fmt.Errorf("path %q from input does not contain path %q", bucketTargeting.ControllingWorkspacePath(), externalPath) + } + relPath, err := normalpath.Rel(bucketTargeting.ControllingWorkspacePath(), externalPath) + if err != nil { + return "", err + } + return normalpath.NormalizeAndValidate(relPath) + }, }, nil } diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index 96c7416a0a..d0b657bc6d 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -15,6 +15,9 @@ package internal import ( + "path/filepath" + + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" ) @@ -30,17 +33,31 @@ type readWriteBucket struct { func newReadWriteBucket( storageReadWriteBucket storage.ReadWriteBucket, - subDirPath string, - pathForExternalPath func(string) (string, error), + bucketTargeting buftarget.BucketTargeting, ) (*readWriteBucket, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(subDirPath) + // TODO(doria): document this + normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) if err != nil { return nil, err } return &readWriteBucket{ - ReadWriteBucket: storageReadWriteBucket, - subDirPath: normalizedSubDirPath, - pathForExternalPath: pathForExternalPath, + ReadWriteBucket: storageReadWriteBucket, + subDirPath: normalizedSubDirPath, + pathForExternalPath: func(externalPath string) (string, error) { + absBucketPath, err := filepath.Abs(normalpath.Unnormalize(bucketTargeting.ControllingWorkspacePath())) + if err != nil { + return "", err + } + absExternalPath, err := filepath.Abs(normalpath.Unnormalize(externalPath)) + if err != nil { + return "", err + } + path, err := filepath.Rel(absBucketPath, absExternalPath) + if err != nil { + return "", err + } + return normalpath.NormalizeAndValidate(path) + }, }, nil } diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index f9f1346010..cbc0943cdd 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -167,7 +167,7 @@ func (r *reader) GetReadBucketCloser( ctx, container, t, - getReadBucketCloserOptions.terminateFunc, + // getReadBucketCloserOptions.terminateFunc, getReadBucketCloserOptions.protoFileTerminateFunc, ) default: @@ -237,10 +237,6 @@ func (r *reader) getArchiveBucket( archiveRef ArchiveRef, terminateFunc buftarget.TerminateFunc, ) (_ ReadBucketCloser, retErr error) { - subDirPath, err := normalpath.NormalizeAndValidate(archiveRef.SubDirPath()) - if err != nil { - return nil, err - } readCloser, size, err := r.getFileReadCloserAndSize(ctx, container, archiveRef, false) if err != nil { return nil, err @@ -287,7 +283,7 @@ func (r *reader) getArchiveBucket( default: return nil, fmt.Errorf("unknown ArchiveType: %v", archiveType) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), subDirPath, terminateFunc) + return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), archiveRef.SubDirPath(), terminateFunc) } func (r *reader) getDirBucket( @@ -307,7 +303,6 @@ func (r *reader) getProtoFileBucket( container app.EnvStdinContainer, protoFileRef ProtoFileRef, terminateFunc buftarget.TerminateFunc, - protoFileTerminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { if !r.localEnabled { return nil, NewReadLocalDisabledError() @@ -318,7 +313,6 @@ func (r *reader) getProtoFileBucket( r.storageosProvider, protoFileRef.Path(), terminateFunc, - protoFileTerminateFunc, ) } @@ -334,10 +328,6 @@ func (r *reader) getGitBucket( if r.gitCloner == nil { return nil, errors.New("git cloner is nil") } - subDirPath, err := normalpath.NormalizeAndValidate(gitRef.SubDirPath()) - if err != nil { - return nil, err - } gitURL, err := getGitURL(gitRef) if err != nil { return nil, err @@ -356,7 +346,7 @@ func (r *reader) getGitBucket( ); err != nil { return nil, fmt.Errorf("could not clone %s: %v", gitURL, err) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), subDirPath, terminateFunc) + return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), gitRef.SubDirPath(), terminateFunc) } func (r *reader) getModuleKey( @@ -544,43 +534,35 @@ func getReadBucketCloserForBucket( inputSubDirPath string, terminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { - mapPath, subDirPath, _, err := getMapPathAndSubDirPath(ctx, logger, inputBucket, inputSubDirPath, terminateFunc) + // TODO(doria): delete later, keeping some notes as we work. + // The point of doing this is to remap the bucket based on a controlling workspace if found + // and then also remapping the `SubDirPath` against this. + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + inputBucket, + inputSubDirPath, + nil, // TODO(doria): we should plumb paths down here + nil, // TODO(doria): we should plumb paths down here + terminateFunc, + ) if err != nil { return nil, err } - if mapPath != "." { + if bucketTargeting.ControllingWorkspacePath() != "." { inputBucket = storage.MapReadBucketCloser( inputBucket, - storage.MapOnPrefix(mapPath), + storage.MapOnPrefix(bucketTargeting.ControllingWorkspacePath()), ) } logger.Debug( "buffetch creating new bucket", - zap.String("inputSubDirPath", inputSubDirPath), - zap.String("mapPath", mapPath), - zap.String("subDirPath", subDirPath), + zap.String("controllingWorkspacePath", bucketTargeting.ControllingWorkspacePath()), + zap.Strings("targetPaths", bucketTargeting.TargetPaths()), ) return newReadBucketCloser( inputBucket, - subDirPath, - // This turns paths that were done relative to the root of the input into paths - // that are now relative to the mapped bucket. - // - // This happens if you do i.e. .git#subdir=foo/bar --path foo/bar/baz.proto - // We need to turn the path into baz.proto - func(externalPath string) (string, error) { - if filepath.IsAbs(externalPath) { - return "", fmt.Errorf("%s: absolute paths cannot be used for this input type", externalPath) - } - if !normalpath.EqualsOrContainsPath(mapPath, externalPath, normalpath.Relative) { - return "", fmt.Errorf("path %q from input does not contain path %q", mapPath, externalPath) - } - relPath, err := normalpath.Rel(mapPath, externalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(relPath) - }, + bucketTargeting, ) } @@ -592,14 +574,10 @@ func getReadWriteBucketForOS( inputDirPath string, terminateFunc buftarget.TerminateFunc, ) (ReadWriteBucket, error) { - inputDirPath = normalpath.Normalize(inputDirPath) - absInputDirPath, err := normalpath.NormalizeAndAbsolute(inputDirPath) + fsRoot, inputSubDirPath, err := fsRootAndFSRelPathForPath(inputDirPath) if err != nil { return nil, err } - // Split the absolute path into components to get the FS root - absInputDirPathComponents := normalpath.Components(absInputDirPath) - fsRoot := absInputDirPathComponents[0] osRootBucket, err := storageosProvider.NewReadWriteBucket( fsRoot, storageos.ReadWriteBucketWithSymlinksIfSupported(), @@ -607,22 +585,25 @@ func getReadWriteBucketForOS( if err != nil { return nil, err } - var inputSubDirPath string - if len(absInputDirPathComponents) > 1 { - // The first component is the FS root, so we check this is safe and join - // the rest of the components for the relative input subdir path. - inputSubDirPath = normalpath.Join(absInputDirPathComponents[1:]...) - } - mapPath, subDirPath, _, err := getMapPathAndSubDirPath( + osRootBucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, osRootBucket, inputSubDirPath, + nil, // TODO(doria): we should plumb paths down here + nil, // TODO(doria): we should plumb paths down here terminateFunc, ) if err != nil { return nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) } + // TODO(doria): I'd like to completely kill and refactor this if possible. + // We now know where the workspace is relative to the FS root. + // If the input path is provided as an absolute path, we create a new bucket for the + // controlling workspace using an absolute path. + // Otherwise we current working directory (pwd) and create the bucket using a relative + // path to that. + // // Examples: // // inputDirPath: path/to/foo @@ -637,28 +618,37 @@ func getReadWriteBucketForOS( // returnSubDirPath: foo // Make bucket on: FS root + returnMapPath (since absolute) var bucketPath string + var inputPath string if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { - bucketPath = normalpath.Join(fsRoot, mapPath) + var err error + bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspacePath()) + inputPath, err = normalpath.Rel(bucketPath, normalpath.Join(fsRoot, inputSubDirPath)) + if err != nil { + return nil, err + } } else { pwd, err := osext.Getwd() if err != nil { return nil, err } - // Removing the root so we can make mapPath relative. - // We are using normalpath.Components to split the path and remove the root (first component). - // The length of the root may vary depending on the OS and file path type (e.g. Windows paths), - // but normalpath.Components takes care of that. - pwdComponents := normalpath.Components(pwd) - if len(pwdComponents) > 1 { - pwd = normalpath.Normalize(normalpath.Join(pwdComponents[1:]...)) - } else { - pwd = "" + _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + if err != nil { + return nil, err } - bucketPath, err = normalpath.Rel(pwd, mapPath) + bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspacePath()) + if err != nil { + return nil, err + } + inputPath, err = normalpath.Rel(osRootBucketTargeting.ControllingWorkspacePath(), inputSubDirPath) if err != nil { return nil, err } } + // Now that we've mapped the workspace against the FS root, we recreate the bucket with + // at the sub dir path. + // First we get the absolute path of the controlling workspace, which based on the OS root + // bucket targeting is the FS root joined with the controlling workspace path. + // And we use it to make a bucket. bucket, err := storageosProvider.NewReadWriteBucket( bucketPath, storageos.ReadWriteBucketWithSymlinksIfSupported(), @@ -666,32 +656,21 @@ func getReadWriteBucketForOS( if err != nil { return nil, err } - logger.Debug( - "creating new OS bucket for controlling workspace", - zap.String("inputDirPath", inputDirPath), - zap.String("workspacePath", bucketPath), - zap.String("subDirPath", subDirPath), + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + bucket, + inputPath, + nil, // TODO(doria): we should plumb paths down here + nil, // TODO(doria): we should plumb paths down here + terminateFunc, ) + if err != nil { + return nil, err + } return newReadWriteBucket( bucket, - subDirPath, - // This function turns paths into paths relative to the bucket. - func(externalPath string) (string, error) { - absBucketPath, err := filepath.Abs(normalpath.Unnormalize(bucketPath)) - if err != nil { - return "", err - } - // We shouldn't actually need to unnormalize externalPath but we do anyways. - absExternalPath, err := filepath.Abs(normalpath.Unnormalize(externalPath)) - if err != nil { - return "", err - } - path, err := filepath.Rel(absBucketPath, absExternalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(path) - }, + bucketTargeting, ) } @@ -702,202 +681,36 @@ func getReadBucketCloserForOSProtoFile( storageosProvider storageos.Provider, protoFilePath string, terminateFunc buftarget.TerminateFunc, - protoFileTerminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, error) { - // First, we figure out which directory we consider to be the module that encapsulates - // this ProtoFileRef. If we find a buf.yaml or buf.work.yaml, then we use that as the directory. If we - // do not, we use the current directory as the directory. - protoFileDirPath := normalpath.Dir(protoFilePath) - absProtoFileDirPath, err := normalpath.NormalizeAndAbsolute(protoFileDirPath) - if err != nil { - return nil, err - } - // Split the absolute path into components to get the FS root - absProtoFileDirPathComponents := normalpath.Components(absProtoFileDirPath) - fsRoot := absProtoFileDirPathComponents[0] - osRootBucket, err := storageosProvider.NewReadWriteBucket( - fsRoot, - storageos.ReadWriteBucketWithSymlinksIfSupported(), - ) - if err != nil { - return nil, err - } - var inputSubDirPath string - if len(absProtoFileDirPathComponents) > 1 { - // The first component is the FS root, so we check this is safe and join - // the rest of the components for the relative input subdir path. - inputSubDirPath = normalpath.Join(absProtoFileDirPathComponents[1:]...) - } - // mapPath is the path to the bucket that contains a buf.yaml. - // subDirPath is the relative path from mapPath to the protoFileDirPath, but we don't use it. - mapPath, _, terminate, err := getMapPathAndSubDirPath( + protoFileDir := normalpath.Dir(protoFilePath) + readWriteBucket, err := getReadWriteBucketForOS( ctx, logger, - osRootBucket, - inputSubDirPath, - protoFileTerminateFunc, + storageosProvider, + protoFileDir, + terminateFunc, ) - if err != nil { - return nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) - } - - var protoTerminateFileDirPath string - if !terminate { - // If we did not find a buf.yaml or buf.work.yaml, use the current directory. - // If the ProtoFileRef path was absolute, use an absolute path, otherwise relative. - // - // However, if the current directory does not contain the .proto file, we cannot use it, - // as we need the bucket to encapsulate the .proto file. In this case, we fall back - // to using the absolute directory of the .proto file. We need to do this because - // PathForExternalPath (defined in getReadWriteBucketForOS) needs to make sure that - // a given path can be made relative to the bucket, and be normalized and validated. - if filepath.IsAbs(normalpath.Unnormalize(protoFileDirPath)) { - pwd, err := osext.Getwd() - if err != nil { - return nil, err - } - protoTerminateFileDirPath = normalpath.Normalize(pwd) - } else { - protoTerminateFileDirPath = "." - } - absProtoTerminateFileDirPath, err := normalpath.NormalizeAndAbsolute(protoTerminateFileDirPath) - if err != nil { - return nil, err - } - if !normalpath.EqualsOrContainsPath(absProtoFileDirPath, absProtoTerminateFileDirPath, normalpath.Absolute) { - logger.Debug( - "did not find enclosing module or workspace for proto file ref and pwd does not encapsulate proto file", - zap.String("protoFilePath", protoFilePath), - zap.String("defaultingToAbsProtoFileDirPath", absProtoFileDirPath), - ) - protoTerminateFileDirPath = absProtoFileDirPath - } else { - logger.Debug( - "did not find enclosing module or workspace for proto file ref", - zap.String("protoFilePath", protoFilePath), - zap.String("defaultingToPwd", protoTerminateFileDirPath), - ) - } - } else { - // We found a buf.yaml or buf.work.yaml, use that directory. - // If we found a buf.yaml or buf.work.yaml and the ProtoFileRef path is absolute, use an absolute path, otherwise relative. - if filepath.IsAbs(normalpath.Unnormalize(protoFileDirPath)) { - protoTerminateFileDirPath = normalpath.Join(fsRoot, mapPath) - } else { - pwd, err := osext.Getwd() - if err != nil { - return nil, err - } - // Removing the root so we can make mapPath relative. - // We are using normalpath.Components to split the path and remove the root (first component). - // The length of the root may vary depending on the OS and file path type (e.g. Windows paths), - // but normalpath.Components takes care of that. - pwdComponents := normalpath.Components(pwd) - if len(pwdComponents) > 1 { - pwd = normalpath.Normalize(normalpath.Join(pwdComponents[1:]...)) - } else { - pwd = "" - } - protoTerminateFileDirPath, err = normalpath.Rel(pwd, mapPath) - if err != nil { - return nil, err - } - } - logger.Debug( - "found enclosing module or workspace for proto file ref", - zap.String("protoFilePath", protoFilePath), - zap.String("enclosingDirPath", protoTerminateFileDirPath), - ) - } - // Now, build a workspace bucket based on the directory we found. - // If the directory is a module directory, we'll get the enclosing workspace. - // If the directory is a workspace directory, this will effectively be a no-op. - readWriteBucket, err := getReadWriteBucketForOS(ctx, logger, storageosProvider, protoTerminateFileDirPath, terminateFunc) if err != nil { return nil, err } return newReadBucketCloserForReadWriteBucket(readWriteBucket), nil } -// Gets two values: -// -// - The directory relative to the bucket that the bucket should be mapped onto. -// - A new subDirPath that matches the inputSubDirPath but for the new ReadBucketCloser. -// -// Examples: -// -// inputSubDirPath: path/to/foo -// terminateFileLocation: path/to -// returnMapPath: path/to -// returnSubDirPath: foo -// -// inputSubDirPath: users/alice/path/to/foo -// terminateFileLocation: users/alice/path/to -// returnMapPath: users/alice/path/to -// returnSubDirPath: foo -// -// inputBucket: path/to/foo -// terminateFileLocation: NONE -// returnMapPath: path/to/foo -// returnSubDirPath: . - -// inputSubDirPath: . -// terminateFileLocation: NONE -// returnMapPath: . -// returnSubDirPath: . -func getMapPathAndSubDirPath( - ctx context.Context, - logger *zap.Logger, - inputBucket storage.ReadBucket, - inputSubDirPath string, - terminateFunc buftarget.TerminateFunc, -) (mapPath string, subDirPath string, terminate bool, retErr error) { - inputSubDirPath, err := normalpath.NormalizeAndValidate(inputSubDirPath) +// fsRootAndFSRelPathForPath is a helper function that takes a path and returns the FS +// root and relative path to the FS root. +func fsRootAndFSRelPathForPath(path string) (string, string, error) { + absPath, err := normalpath.NormalizeAndAbsolute(path) if err != nil { - return "", "", false, err - } - // The for loops would take care of this base case, but we don't want - // to call storage.MapReadBucket unless we have to. - if terminateFunc == nil { - return inputSubDirPath, ".", false, nil + return "", "", err } - // We can't do this in a traditional loop like this: - // - // for curDirPath := inputSubDirPath; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { - // - // If we do that, then we don't run terminateFunc for ".", which we want to so that we get - // the correct value for the terminate bool. - // - // Instead, we effectively do a do-while loop. - curDirPath := inputSubDirPath - for { - terminate, err := terminateFunc(ctx, inputBucket, curDirPath, inputSubDirPath) - if err != nil { - return "", "", false, err - } - if terminate { - logger.Debug( - "buffetch termination found", - zap.String("curDirPath", curDirPath), - zap.String("inputSubDirPath", inputSubDirPath), - ) - subDirPath, err := normalpath.Rel(curDirPath, inputSubDirPath) - if err != nil { - return "", "", false, err - } - return curDirPath, subDirPath, true, nil - } - if curDirPath == "." { - // Do this instead. This makes this loop effectively a do-while loop. - break - } - curDirPath = normalpath.Dir(curDirPath) + // Split the absolute path into components to get the FS root + absPathComponents := normalpath.Components(absPath) + fsRoot := absPathComponents[0] + fsRelPath, err := normalpath.Rel(fsRoot, absPath) + if err != nil { + return "", "", err } - logger.Debug( - "buffetch no termination found", - zap.String("inputSubDirPath", inputSubDirPath), - ) - return inputSubDirPath, ".", false, nil + return fsRoot, fsRelPath, nil } // We attempt to fix up paths we get back to better printing to the user. diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index 6949b3c2c4..0a8b40b30e 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -216,7 +216,6 @@ func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testin zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", - nil, testNewTerminateAtFileNamesFunc("buf.yaml"), ) require.NoError(t, err) @@ -236,7 +235,6 @@ func TestGetReadBucketCloserForOSProtoFileTerminateFileName(t *testing.T) { storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) @@ -269,7 +267,6 @@ func TestGetReadBucketCloserForOSProtoFileParentPwd(t *testing.T) { storageos.NewProvider(), "five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) @@ -304,7 +301,6 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/proto/foo.proto"), testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) @@ -326,7 +322,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLTerminateFileName(t *testing. storageos.NewProvider(), "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", readBucketCloser.SubDirPath()) @@ -356,7 +351,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLParentPwd(t *testing.T) { storageos.NewProvider(), "five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", readBucketCloser.SubDirPath()) @@ -392,7 +386,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index e035830c59..8fb4c91771 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -25,17 +25,22 @@ import ( // TODO(doria): we need to add the following info: // BufWorkYAMLDirPaths (for v1 vs. v2 workspaces) +// Document that everything here is relative paths // BucketTargeting provides the path to the controllng workspace, target paths, and target // exclude paths mapped to the bucket. type BucketTargeting interface { - // ControllingWorkspace returns the mapped path for the controlling workspace. - ControllingWorkspace() string - // TargetPaths returns the target paths mapped to the bucket. + // ControllingWorkspace returns the path for the controlling workspace relative to the root of the bucket. + ControllingWorkspacePath() string + // InputPath returns the input path relative to the root fo the bucket + // TODO(doria): should the input path be treated differently from all other target paths? + InputPath() string + // TargetPaths returns the target paths relative to the root of the bucket. TargetPaths() []string - // TargetExcludePaths returns the target exclude paths mapped to the bucket. + // TargetExcludePaths returns the target exclude paths relative to the root of the bucket. TargetExcludePaths() []string // Terminated returns whether the controlling workspace was found through the terminateFunc. + // TODO(doria): should be be able to kill this. Terminated() bool isBucketTargeting() @@ -44,13 +49,13 @@ type BucketTargeting interface { func NewBucketTargeting( ctx context.Context, logger *zap.Logger, - inputBucket storage.ReadBucket, - inputSubDirPath string, + bucket storage.ReadBucket, + inputPath string, targetPaths []string, targetExcludePaths []string, terminateFunc TerminateFunc, // TODO(doria): move that out of buffetch ) (BucketTargeting, error) { - return newBucketTargeting(ctx, logger, inputBucket, inputSubDirPath, targetPaths, targetExcludePaths, terminateFunc) + return newBucketTargeting(ctx, logger, bucket, inputPath, targetPaths, targetExcludePaths, terminateFunc) } // *** PRIVATE *** @@ -60,14 +65,19 @@ var ( ) type bucketTargeting struct { - controllingWorkspace string - targetPaths []string - targetExcludePaths []string - terminated bool + controllingWorkspacePath string + inputPath string + targetPaths []string + targetExcludePaths []string + terminated bool +} + +func (b *bucketTargeting) ControllingWorkspacePath() string { + return b.controllingWorkspacePath } -func (b *bucketTargeting) ControllingWorkspace() string { - return b.controllingWorkspace +func (b *bucketTargeting) InputPath() string { + return b.inputPath } func (b *bucketTargeting) TargetPaths() []string { @@ -88,63 +98,63 @@ func (b *bucketTargeting) isBucketTargeting() { func newBucketTargeting( ctx context.Context, logger *zap.Logger, - inputBucket storage.ReadBucket, + bucket storage.ReadBucket, inputPath string, targetPaths []string, targetExcludePaths []string, terminateFunc TerminateFunc, ) (*bucketTargeting, error) { // First we map the controlling workspace for the inputPath. - controllingWorkspace, mappedInputPath, terminated, err := mapControllingWorkspaceAndPath( + controllingWorkspacePath, mappedInputPath, terminated, err := mapControllingWorkspaceAndPath( ctx, logger, - inputBucket, + bucket, inputPath, terminateFunc, ) if err != nil { return nil, err } - // The input path should not be treated differently from just a normal target path. - mappedTargetPaths := []string{mappedInputPath} + mappedTargetPaths := make([]string, len(targetPaths)) // Then we do the same for each target path. If the target paths resolve to different // controlling workspaces, then we return an error. // We currently do not compile nested workspaces, but this algorithm lets us potentially // handle nested workspaces in the future. // TODO(doria): this shouldn't have a big impact on performance, right? // TODO(doria): do we need to handle that there was a termination through the paths? maybe. - for _, targetPath := range targetPaths { - controllingWorkspaceForPath, mappedPath, _, err := mapControllingWorkspaceAndPath( + for i, targetPath := range targetPaths { + controllingWorkspacePathForTargetPath, mappedPath, _, err := mapControllingWorkspaceAndPath( ctx, logger, - inputBucket, + bucket, targetPath, terminateFunc, ) if err != nil { return nil, err } - if controllingWorkspaceForPath != controllingWorkspace { - return nil, fmt.Errorf("more than one workspace resolved for given paths: %q, %q", controllingWorkspaceForPath, controllingWorkspace) + if controllingWorkspacePathForTargetPath != controllingWorkspacePath { + return nil, fmt.Errorf("more than one workspace resolved for given paths: %q, %q", controllingWorkspacePathForTargetPath, controllingWorkspacePath) } - mappedTargetPaths = append(mappedTargetPaths, mappedPath) + mappedTargetPaths[i] = mappedPath } // NOTE: we do not map excluded paths to their own workspaces -- we use the controlling // workspace we resolved through our input path and target paths. If an excluded path does // not exist, we do not validate this. - var mappedTargetExcludePaths []string - for _, targetExcludePath := range targetExcludePaths { - mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace, targetExcludePath) + mappedTargetExcludePaths := make([]string, len(targetExcludePaths)) + for i, targetExcludePath := range targetExcludePaths { + mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspacePath, targetExcludePath) if err != nil { return nil, err } - mappedTargetExcludePaths = append(mappedTargetExcludePaths, mappedTargetExcludePath) + mappedTargetExcludePaths[i] = mappedTargetExcludePath } return &bucketTargeting{ - controllingWorkspace: controllingWorkspace, - targetPaths: mappedTargetPaths, - targetExcludePaths: mappedTargetExcludePaths, - terminated: terminated, + controllingWorkspacePath: controllingWorkspacePath, + inputPath: mappedInputPath, + targetPaths: mappedTargetPaths, + targetExcludePaths: mappedTargetExcludePaths, + terminated: terminated, }, nil } @@ -153,18 +163,18 @@ func newBucketTargeting( func mapControllingWorkspaceAndPath( ctx context.Context, logger *zap.Logger, - inputBucket storage.ReadBucket, + bucket storage.ReadBucket, path string, terminateFunc TerminateFunc, ) (string, string, bool, error) { path, err := normalpath.NormalizeAndValidate(path) if err != nil { - return "", "", err + return "", "", false, err } // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at // the path. if terminateFunc == nil { - return path, ".", nil + return path, ".", false, nil } // We can't do this in a traditional loop like this: // @@ -176,7 +186,7 @@ func mapControllingWorkspaceAndPath( // Instead, we effectively do a do-while loop. curDirPath := path for { - terminate, err := terminateFunc(ctx, inputBucket, curDirPath, path) + terminate, err := terminateFunc(ctx, bucket, curDirPath, path) if err != nil { return "", "", false, err } From b3daed062550e5374b1976623ff40b97659f8619 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Thu, 15 Feb 2024 19:38:47 -0500 Subject: [PATCH 03/29] WIP --- private/buf/bufctl/controller.go | 34 +- private/buf/bufctl/option.go | 38 +- private/buf/buffetch/buffetch.go | 47 +- private/buf/buffetch/internal/internal.go | 52 ++- .../buffetch/internal/read_bucket_closer.go | 36 +- .../buffetch/internal/read_write_bucket.go | 26 +- private/buf/buffetch/internal/reader.go | 283 ++++++++---- private/buf/buffetch/internal/reader_test.go | 119 +++-- private/buf/buffetch/reader.go | 27 +- private/buf/buftarget/bucket_targeting.go | 132 +++--- .../buf/buftarget/controlling_workspace.go | 59 +++ private/buf/buftarget/terminate.go | 70 ++- private/buf/bufworkspace/module_targeting.go | 21 +- .../workspace_dep_manager_provider.go | 32 +- .../buf/bufworkspace/workspace_provider.go | 312 ++----------- .../buf/bufworkspace/workspace_targeting.go | 418 +++++++++++++++--- private/buf/bufworkspace/workspace_test.go | 66 ++- private/buf/cmd/buf/buf_test.go | 12 +- .../command/generate/generate_unix_test.go | 8 - private/buf/cmd/buf/workspace_test.go | 4 +- .../proto/workspacetest/workspace_test.go | 2 - .../bufcheck/bufbreaking/bufbreaking_test.go | 25 ++ .../bufpkg/bufcheck/buflint/buflint_test.go | 13 + private/bufpkg/bufconfig/terminate.go | 235 ---------- private/bufpkg/bufimage/build_image.go | 2 + private/bufpkg/bufmodule/module_dep.go | 1 - .../bufpkg/bufmodule/module_set_builder.go | 1 + .../bufpkg/bufmodule/proto_file_tracker.go | 1 + 28 files changed, 1113 insertions(+), 963 deletions(-) create mode 100644 private/buf/buftarget/controlling_workspace.go delete mode 100644 private/bufpkg/bufconfig/terminate.go diff --git a/private/buf/bufctl/controller.go b/private/buf/bufctl/controller.go index 1df0501c71..2f6ccbbe87 100644 --- a/private/buf/bufctl/controller.go +++ b/private/buf/bufctl/controller.go @@ -792,7 +792,7 @@ func (c *controller) getWorkspaceForProtoFileRef( // TODO FUTURE: Feed flag names through to here. return nil, fmt.Errorf("--exclude-path is not valid for use with .proto file references") } - readBucketCloser, err := c.buffetchReader.GetSourceReadBucketCloser( + readBucketCloser, bucketTargeting, err := c.buffetchReader.GetSourceReadBucketCloser( ctx, c.container, protoFileRef, @@ -804,19 +804,9 @@ func (c *controller) getWorkspaceForProtoFileRef( defer func() { retErr = multierr.Append(retErr, readBucketCloser.Close()) }() - // The ProtoFilePath is still relative to the input bucket, not the bucket - // retrieved from buffetch. Treat the path just as we do with targetPaths - // and externalPaths in withPathsForBucketExtender. - protoFilePath, err := readBucketCloser.PathForExternalPath(protoFileRef.ProtoFilePath()) - if err != nil { - return nil, err - } options := []bufworkspace.WorkspaceBucketOption{ - bufworkspace.WithTargetSubDirPath( - readBucketCloser.SubDirPath(), - ), bufworkspace.WithProtoFileTargetPath( - protoFilePath, + protoFileRef.ProtoFilePath(), protoFileRef.IncludePackageFiles(), ), bufworkspace.WithConfigOverride( @@ -832,6 +822,7 @@ func (c *controller) getWorkspaceForProtoFileRef( return c.workspaceProvider.GetWorkspaceForBucket( ctx, readBucketCloser, + bucketTargeting, options..., ) } @@ -841,7 +832,7 @@ func (c *controller) getWorkspaceForSourceRef( sourceRef buffetch.SourceRef, functionOptions *functionOptions, ) (_ bufworkspace.Workspace, retErr error) { - readBucketCloser, err := c.buffetchReader.GetSourceReadBucketCloser( + readBucketCloser, bucketTargeting, err := c.buffetchReader.GetSourceReadBucketCloser( ctx, c.container, sourceRef, @@ -853,17 +844,13 @@ func (c *controller) getWorkspaceForSourceRef( defer func() { retErr = multierr.Append(retErr, readBucketCloser.Close()) }() - functionOptions, err = functionOptions.withPathsForBucketExtender(readBucketCloser) - if err != nil { - return nil, err - } options := []bufworkspace.WorkspaceBucketOption{ bufworkspace.WithTargetSubDirPath( - readBucketCloser.SubDirPath(), + bucketTargeting.InputPath(), ), bufworkspace.WithTargetPaths( - functionOptions.targetPaths, - functionOptions.targetExcludePaths, + bucketTargeting.TargetPaths(), + bucketTargeting.TargetExcludePaths(), ), bufworkspace.WithConfigOverride( functionOptions.configOverride, @@ -878,6 +865,7 @@ func (c *controller) getWorkspaceForSourceRef( return c.workspaceProvider.GetWorkspaceForBucket( ctx, readBucketCloser, + bucketTargeting, options..., ) } @@ -887,7 +875,7 @@ func (c *controller) getWorkspaceDepManagerForDirRef( dirRef buffetch.DirRef, functionOptions *functionOptions, ) (_ bufworkspace.WorkspaceDepManager, retErr error) { - readWriteBucket, err := c.buffetchReader.GetDirReadWriteBucket( + readWriteBucket, bucketTargeting, err := c.buffetchReader.GetDirReadWriteBucket( ctx, c.container, dirRef, @@ -901,9 +889,7 @@ func (c *controller) getWorkspaceDepManagerForDirRef( return c.workspaceDepManagerProvider.GetWorkspaceDepManager( ctx, readWriteBucket, - bufworkspace.WithTargetSubDirPath( - readWriteBucket.SubDirPath(), - ), + bucketTargeting, ) } diff --git a/private/buf/bufctl/option.go b/private/buf/bufctl/option.go index 581f43f544..733e13130d 100644 --- a/private/buf/bufctl/option.go +++ b/private/buf/bufctl/option.go @@ -14,7 +14,9 @@ package bufctl -import "github.com/bufbuild/buf/private/buf/buffetch" +import ( + "github.com/bufbuild/buf/private/buf/buffetch" +) type ControllerOption func(*controller) @@ -144,28 +146,6 @@ func newFunctionOptions(controller *controller) *functionOptions { } } -func (f *functionOptions) withPathsForBucketExtender( - bucketExtender buffetch.BucketExtender, -) (*functionOptions, error) { - deref := *f - c := &deref - for i, inputTargetPath := range c.targetPaths { - targetPath, err := bucketExtender.PathForExternalPath(inputTargetPath) - if err != nil { - return nil, err - } - c.targetPaths[i] = targetPath - } - for i, inputTargetExcludePath := range c.targetExcludePaths { - targetExcludePath, err := bucketExtender.PathForExternalPath(inputTargetExcludePath) - if err != nil { - return nil, err - } - c.targetExcludePaths[i] = targetExcludePath - } - return c, nil -} - func (f *functionOptions) getGetReadBucketCloserOptions() []buffetch.GetReadBucketCloserOption { var getReadBucketCloserOptions []buffetch.GetReadBucketCloserOption if f.copyToInMemory { @@ -174,6 +154,18 @@ func (f *functionOptions) getGetReadBucketCloserOptions() []buffetch.GetReadBuck buffetch.GetReadBucketCloserWithCopyToInMemory(), ) } + if len(f.targetPaths) > 0 { + getReadBucketCloserOptions = append( + getReadBucketCloserOptions, + buffetch.GetReadBucketCloserWithTargetPaths(f.targetPaths), + ) + } + if len(f.targetExcludePaths) > 0 { + getReadBucketCloserOptions = append( + getReadBucketCloserOptions, + buffetch.GetReadBucketCloserWithTargetExcludePaths(f.targetExcludePaths), + ) + } if f.configOverride != "" { // If we have a config override, we do not search for buf.yamls or buf.work.yamls, // instead acting as if the config override was the only configuration file available. diff --git a/private/buf/buffetch/buffetch.go b/private/buf/buffetch/buffetch.go index 47efb7820c..5080b9db3e 100644 --- a/private/buf/buffetch/buffetch.go +++ b/private/buf/buffetch/buffetch.go @@ -22,6 +22,7 @@ import ( "net/http" "github.com/bufbuild/buf/private/buf/buffetch/internal" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" @@ -321,7 +322,7 @@ type SourceReader interface { container app.EnvStdinContainer, sourceRef SourceRef, options ...GetReadBucketCloserOption, - ) (ReadBucketCloser, error) + ) (ReadBucketCloser, buftarget.BucketTargeting, error) } // GetReadBucketCloserOption is an option for a GetSourceReadBucketCloser call. @@ -345,6 +346,22 @@ func GetReadBucketCloserWithNoSearch() GetReadBucketCloserOption { } } +// GetReadBucketCloserWithTargetPaths sets the targets paths for bucket targeting information +// returned with the bucket. +func GetReadBucketCloserWithTargetPaths(targetPaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetPaths = targetPaths + } +} + +// GetReadBucketCloserWithTargetExcludePaths sets the target exclude paths for bucket targeting +// information returned with the bucket. +func GetReadBucketCloserWithTargetExcludePaths(targetExcludePaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetExcludePaths = targetExcludePaths + } +} + // DirReader is a dir reader. type DirReader interface { // GetDirReadWriteBucket gets the dir bucket. @@ -353,7 +370,7 @@ type DirReader interface { container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, - ) (ReadWriteBucket, error) + ) (ReadWriteBucket, buftarget.BucketTargeting, error) } // GetReadWriteBucketOption is an option for a GetDirReadWriteBucket call. @@ -369,6 +386,22 @@ func GetReadWriteBucketWithNoSearch() GetReadWriteBucketOption { } } +// GetReadWriteBucketWithTargetPaths sets the target paths for the bucket targeting information +// returned with the bucket. +func GetReadWriteBucketWithTargetPaths(targetPaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetPaths = targetPaths + } +} + +// GetReadWriteBucketWithTargetExcludePaths sets the target exclude paths for the bucket +// targeting information returned with the bucket. +func GetReadWriteBucketWithTargetExcludePaths(targetExcludePaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetExcludePaths = targetExcludePaths + } +} + // ModuleFetcher is a module fetcher. type ModuleFetcher interface { // GetModuleKey gets the ModuleKey. @@ -544,8 +577,10 @@ func GetInputConfigForString( } type getReadBucketCloserOptions struct { - noSearch bool - copyToInMemory bool + noSearch bool + copyToInMemory bool + targetPaths []string + targetExcludePaths []string } func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { @@ -553,7 +588,9 @@ func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { } type getReadWriteBucketOptions struct { - noSearch bool + noSearch bool + targetPaths []string + targetExcludePaths []string } func newGetReadWriteBucketOptions() *getReadWriteBucketOptions { diff --git a/private/buf/buffetch/internal/internal.go b/private/buf/buffetch/internal/internal.go index d55c744dd0..938f907e97 100644 --- a/private/buf/buffetch/internal/internal.go +++ b/private/buf/buffetch/internal/internal.go @@ -184,7 +184,7 @@ type ProtoFileRef interface { BucketRef // Path is the normalized path to the file reference. Path() string - // IncludePackageFiles says to include the same package files TODO update comment + // IncludePackageFiles says to include the files from the same package files IncludePackageFiles() bool FileScheme() FileScheme protoFileRef() @@ -412,12 +412,6 @@ type BucketExtender interface { // the directory that contained this terminate file, and the subDirPath will be the sub-direftory of // the actual asset relative to the terminate file. SubDirPath() string - - // PathForExternalPath takes a path external to the asset and converts it to - // a path that is relative to the asset. - // - // The returned path will be normalized and validated. - PathForExternalPath(externalPath string) (string, error) } // ReadBucketCloser is a bucket returned from GetReadBucketCloser. @@ -444,23 +438,19 @@ type Reader interface { options ...GetFileOption, ) (io.ReadCloser, error) // GetReadBucketCloser gets the bucket. - // - // If done for a ProtoFileRef, the Bucket will be for the enclosing module or workspace via - // terminateFileNames or protoFileTerminateFileNames. No filtering of the Bucket is performed, this is - // the responsibility of the caller. GetReadBucketCloser( ctx context.Context, container app.EnvStdinContainer, bucketRef BucketRef, options ...GetReadBucketCloserOption, - ) (ReadBucketCloser, error) + ) (ReadBucketCloser, buftarget.BucketTargeting, error) // GetReadWriteBucket gets the bucket. GetReadWriteBucket( ctx context.Context, container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, - ) (ReadWriteBucket, error) + ) (ReadWriteBucket, buftarget.BucketTargeting, error) // GetModuleKey gets the ModuleKey. GetModuleKey( ctx context.Context, @@ -468,15 +458,6 @@ type Reader interface { moduleRef ModuleRef, options ...GetModuleOption, ) (bufmodule.ModuleKey, error) - - // TODO(doria): rename this - // GetBucketTargeting( - // ctx context.Context, - // inputSubDirPath string, - // targetPaths []string, - // targetExcludePaths []string, - // terminateFunc TerminateFunc, // TODO(doria): move that out of buffetch - // ) (buftarget.BucketTargeting, error) } // NewReader returns a new Reader. @@ -857,6 +838,20 @@ func WithGetReadBucketCloserProtoFileTerminateFunc(protoFileTerminateFunc buftar } } +// WithGetReadBucketCloserTargetPaths sets the target paths for the bucket targeting information. +func WithGetReadBucketCloserTargetPaths(targetPaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetPaths = targetPaths + } +} + +// WithGetReadBucketCloserTargetExcludePaths sets the target exclude paths for the bucket targeting information. +func WithGetReadBucketCloserTargetExcludePaths(targetExcludePaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetExcludePaths = targetExcludePaths + } +} + // GetReadWriteBucketOption is a GetReadWriteBucket option. type GetReadWriteBucketOption func(*getReadWriteBucketOptions) @@ -873,6 +868,19 @@ func WithGetReadWriteBucketTerminateFunc(terminateFunc buftarget.TerminateFunc) } } +// WithGetReadWriteBucketTargetPaths sets the target paths for the bucket targeting information. +func WithGetReadWriteBucketTargetPaths(targetPaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetPaths = targetPaths + } +} + +func WithGetReadWriteBucketTargetExcludePaths(targetExcludePaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetExcludePaths = targetExcludePaths + } +} + // PutFileOption is a PutFile option. type PutFileOption func(*putFileOptions) diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index e0034c7c4f..28c3a96ea3 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -16,8 +16,6 @@ package internal import ( "context" - "fmt" - "path/filepath" "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" @@ -30,12 +28,12 @@ var _ ReadBucketCloser = &readBucketCloser{} type readBucketCloser struct { storage.ReadBucketCloser - subDirPath string - pathForExternalPath func(string) (string, error) + subDirPath string } func newReadBucketCloser( storageReadBucketCloser storage.ReadBucketCloser, + bucketPath string, bucketTargeting buftarget.BucketTargeting, ) (*readBucketCloser, error) { normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) @@ -45,24 +43,6 @@ func newReadBucketCloser( return &readBucketCloser{ ReadBucketCloser: storageReadBucketCloser, subDirPath: normalizedSubDirPath, - // This turns paths that were done relative to the root of the input into paths - // that are now relative to the mapped bucket. - // - // This happens if you do i.e. .git#subdir=foo/bar --path foo/bar/baz.proto - // We need to turn the path into baz.proto - pathForExternalPath: func(externalPath string) (string, error) { - if filepath.IsAbs(externalPath) { - return "", fmt.Errorf("%s: absolute paths cannot be used for this input type", externalPath) - } - if !normalpath.EqualsOrContainsPath(bucketTargeting.ControllingWorkspacePath(), externalPath, normalpath.Relative) { - return "", fmt.Errorf("path %q from input does not contain path %q", bucketTargeting.ControllingWorkspacePath(), externalPath) - } - relPath, err := normalpath.Rel(bucketTargeting.ControllingWorkspacePath(), externalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(relPath) - }, }, nil } @@ -70,9 +50,8 @@ func newReadBucketCloserForReadWriteBucket( readWriteBucket ReadWriteBucket, ) *readBucketCloser { return &readBucketCloser{ - ReadBucketCloser: storage.NopReadBucketCloser(readWriteBucket), - subDirPath: readWriteBucket.SubDirPath(), - pathForExternalPath: readWriteBucket.PathForExternalPath, + ReadBucketCloser: storage.NopReadBucketCloser(readWriteBucket), + subDirPath: readWriteBucket.SubDirPath(), } } @@ -80,10 +59,6 @@ func (r *readBucketCloser) SubDirPath() string { return r.subDirPath } -func (r *readBucketCloser) PathForExternalPath(externalPath string) (string, error) { - return r.pathForExternalPath(externalPath) -} - func (r *readBucketCloser) copyToInMemory(ctx context.Context) (*readBucketCloser, error) { storageReadBucket, err := storagemem.CopyReadBucket(ctx, r.ReadBucketCloser) if err != nil { @@ -94,8 +69,7 @@ func (r *readBucketCloser) copyToInMemory(ctx context.Context) (*readBucketClose ReadBucket: storageReadBucket, closeFunc: r.ReadBucketCloser.Close, }, - subDirPath: r.subDirPath, - pathForExternalPath: r.pathForExternalPath, + subDirPath: r.subDirPath, }, nil } diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index d0b657bc6d..5dbade8311 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -15,8 +15,6 @@ package internal import ( - "path/filepath" - "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" @@ -27,15 +25,14 @@ var _ ReadWriteBucket = &readWriteBucket{} type readWriteBucket struct { storage.ReadWriteBucket - subDirPath string - pathForExternalPath func(string) (string, error) + subDirPath string } func newReadWriteBucket( storageReadWriteBucket storage.ReadWriteBucket, + bucketPath string, bucketTargeting buftarget.BucketTargeting, ) (*readWriteBucket, error) { - // TODO(doria): document this normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) if err != nil { return nil, err @@ -43,28 +40,9 @@ func newReadWriteBucket( return &readWriteBucket{ ReadWriteBucket: storageReadWriteBucket, subDirPath: normalizedSubDirPath, - pathForExternalPath: func(externalPath string) (string, error) { - absBucketPath, err := filepath.Abs(normalpath.Unnormalize(bucketTargeting.ControllingWorkspacePath())) - if err != nil { - return "", err - } - absExternalPath, err := filepath.Abs(normalpath.Unnormalize(externalPath)) - if err != nil { - return "", err - } - path, err := filepath.Rel(absBucketPath, absExternalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(path) - }, }, nil } func (r *readWriteBucket) SubDirPath() string { return r.subDirPath } - -func (r *readWriteBucket) PathForExternalPath(externalPath string) (string, error) { - return r.pathForExternalPath(externalPath) -} diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index cbc0943cdd..adef66f481 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -112,12 +112,11 @@ func (r *reader) GetReadBucketCloser( container app.EnvStdinContainer, bucketRef BucketRef, options ...GetReadBucketCloserOption, -) (retReadBucketCloser ReadBucketCloser, retErr error) { +) (retReadBucketCloser ReadBucketCloser, _ buftarget.BucketTargeting, retErr error) { getReadBucketCloserOptions := newGetReadBucketCloserOptions() for _, option := range options { option(getReadBucketCloserOptions) } - if getReadBucketCloserOptions.copyToInMemory { defer func() { if retReadBucketCloser != nil { @@ -135,31 +134,36 @@ func (r *reader) GetReadBucketCloser( } }() } - switch t := bucketRef.(type) { case ArchiveRef: return r.getArchiveBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) case DirRef: - readWriteBucket, err := r.getDirBucket( + readWriteBucket, bucketTargeting, err := r.getDirBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) if err != nil { - return nil, err + return nil, nil, err } - return newReadBucketCloserForReadWriteBucket(readWriteBucket), nil + return newReadBucketCloserForReadWriteBucket(readWriteBucket), bucketTargeting, nil case GitRef: return r.getGitBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) case ProtoFileRef: @@ -167,11 +171,10 @@ func (r *reader) GetReadBucketCloser( ctx, container, t, - // getReadBucketCloserOptions.terminateFunc, getReadBucketCloserOptions.protoFileTerminateFunc, ) default: - return nil, fmt.Errorf("unknown BucketRef type: %T", bucketRef) + return nil, nil, fmt.Errorf("unknown BucketRef type: %T", bucketRef) } } @@ -180,7 +183,7 @@ func (r *reader) GetReadWriteBucket( container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, -) (ReadWriteBucket, error) { +) (ReadWriteBucket, buftarget.BucketTargeting, error) { getReadWriteBucketOptions := newGetReadWriteBucketOptions() for _, option := range options { option(getReadWriteBucketOptions) @@ -189,6 +192,8 @@ func (r *reader) GetReadWriteBucket( ctx, container, dirRef, + getReadWriteBucketOptions.targetPaths, + getReadWriteBucketOptions.targetExcludePaths, getReadWriteBucketOptions.terminateFunc, ) } @@ -235,11 +240,13 @@ func (r *reader) getArchiveBucket( ctx context.Context, container app.EnvStdinContainer, archiveRef ArchiveRef, + targetPaths []string, + targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, -) (_ ReadBucketCloser, retErr error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { readCloser, size, err := r.getFileReadCloserAndSize(ctx, container, archiveRef, false) if err != nil { - return nil, err + return nil, nil, err } readWriteBucket := storagemem.NewReadWriteBucket() switch archiveType := archiveRef.ArchiveType(); archiveType { @@ -252,21 +259,21 @@ func (r *reader) getArchiveBucket( archiveRef.StripComponents(), ), ); err != nil { - return nil, err + return nil, nil, err } case ArchiveTypeZip: var readerAt io.ReaderAt if size < 0 { data, err := io.ReadAll(readCloser) if err != nil { - return nil, err + return nil, nil, err } readerAt = bytes.NewReader(data) size = int64(len(data)) } else { readerAt, err = ioext.ReaderAtForReader(readCloser) if err != nil { - return nil, err + return nil, nil, err } } if err := storagearchive.Unzip( @@ -278,24 +285,42 @@ func (r *reader) getArchiveBucket( archiveRef.StripComponents(), ), ); err != nil { - return nil, err + return nil, nil, err } default: - return nil, fmt.Errorf("unknown ArchiveType: %v", archiveType) + return nil, nil, fmt.Errorf("unknown ArchiveType: %v", archiveType) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), archiveRef.SubDirPath(), terminateFunc) + return getReadBucketCloserForBucket( + ctx, + r.logger, + storage.NopReadBucketCloser(readWriteBucket), + archiveRef.SubDirPath(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getDirBucket( ctx context.Context, container app.EnvStdinContainer, dirRef DirRef, + targetPaths []string, + targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, -) (ReadWriteBucket, error) { +) (ReadWriteBucket, buftarget.BucketTargeting, error) { if !r.localEnabled { - return nil, NewReadLocalDisabledError() + return nil, nil, NewReadLocalDisabledError() } - return getReadWriteBucketForOS(ctx, r.logger, r.storageosProvider, dirRef.Path(), terminateFunc) + return getReadWriteBucketForOS( + ctx, + r.logger, + r.storageosProvider, + dirRef.Path(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getProtoFileBucket( @@ -303,9 +328,9 @@ func (r *reader) getProtoFileBucket( container app.EnvStdinContainer, protoFileRef ProtoFileRef, terminateFunc buftarget.TerminateFunc, -) (ReadBucketCloser, error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { if !r.localEnabled { - return nil, NewReadLocalDisabledError() + return nil, nil, NewReadLocalDisabledError() } return getReadBucketCloserForOSProtoFile( ctx, @@ -320,17 +345,19 @@ func (r *reader) getGitBucket( ctx context.Context, container app.EnvStdinContainer, gitRef GitRef, + targetPaths []string, + targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, -) (_ ReadBucketCloser, retErr error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { if !r.gitEnabled { - return nil, NewReadGitDisabledError() + return nil, nil, NewReadGitDisabledError() } if r.gitCloner == nil { - return nil, errors.New("git cloner is nil") + return nil, nil, errors.New("git cloner is nil") } gitURL, err := getGitURL(gitRef) if err != nil { - return nil, err + return nil, nil, err } readWriteBucket := storagemem.NewReadWriteBucket() if err := r.gitCloner.CloneToBucket( @@ -344,9 +371,17 @@ func (r *reader) getGitBucket( RecurseSubmodules: gitRef.RecurseSubmodules(), }, ); err != nil { - return nil, fmt.Errorf("could not clone %s: %v", gitURL, err) + return nil, nil, fmt.Errorf("could not clone %s: %v", gitURL, err) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), gitRef.SubDirPath(), terminateFunc) + return getReadBucketCloserForBucket( + ctx, + r.logger, + storage.NopReadBucketCloser(readWriteBucket), + gitRef.SubDirPath(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getModuleKey( @@ -532,38 +567,46 @@ func getReadBucketCloserForBucket( logger *zap.Logger, inputBucket storage.ReadBucketCloser, inputSubDirPath string, + targetPaths []string, + targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, -) (ReadBucketCloser, error) { - // TODO(doria): delete later, keeping some notes as we work. - // The point of doing this is to remap the bucket based on a controlling workspace if found - // and then also remapping the `SubDirPath` against this. +) (ReadBucketCloser, buftarget.BucketTargeting, error) { bucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, inputBucket, inputSubDirPath, - nil, // TODO(doria): we should plumb paths down here - nil, // TODO(doria): we should plumb paths down here + targetPaths, + targetExcludePaths, terminateFunc, ) if err != nil { - return nil, err + return nil, nil, err + } + var bucketPath string + if bucketTargeting.ControllingWorkspace() != nil { + bucketPath = bucketTargeting.ControllingWorkspace().Path() + } else { + bucketPath = bucketTargeting.InputPath() } - if bucketTargeting.ControllingWorkspacePath() != "." { + if bucketPath != "." { inputBucket = storage.MapReadBucketCloser( inputBucket, - storage.MapOnPrefix(bucketTargeting.ControllingWorkspacePath()), + storage.MapOnPrefix(bucketPath), ) + // We return the same bucket targeting information, since we are using mappers to remap + // paths with the controlling workspace as the prefix when using the bucket. } logger.Debug( "buffetch creating new bucket", - zap.String("controllingWorkspacePath", bucketTargeting.ControllingWorkspacePath()), + zap.String("bucketPath", bucketPath), zap.Strings("targetPaths", bucketTargeting.TargetPaths()), ) - return newReadBucketCloser( - inputBucket, - bucketTargeting, - ) + readBucketCloser, err := newReadBucketCloser(inputBucket, bucketPath, bucketTargeting) + if err != nil { + return nil, nil, err + } + return readBucketCloser, bucketTargeting, nil } // Use for directory-based buckets. @@ -572,32 +615,49 @@ func getReadWriteBucketForOS( logger *zap.Logger, storageosProvider storageos.Provider, inputDirPath string, + targetPaths []string, + targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, -) (ReadWriteBucket, error) { - fsRoot, inputSubDirPath, err := fsRootAndFSRelPathForPath(inputDirPath) +) (ReadWriteBucket, buftarget.BucketTargeting, error) { + fsRoot, fsRootInputSubDirPath, err := fsRootAndFSRelPathForPath(inputDirPath) if err != nil { - return nil, err + return nil, nil, err + } + fsRootTargetPaths := make([]string, len(targetPaths)) + for i, targetPath := range targetPaths { + _, fsRootTargetPath, err := fsRootAndFSRelPathForPath(targetPath) + if err != nil { + return nil, nil, err + } + fsRootTargetPaths[i] = fsRootTargetPath + } + fsRootTargetExcludePaths := make([]string, len(targetExcludePaths)) + for i, targetExcludePath := range targetExcludePaths { + _, fsRootTargetExcludePath, err := fsRootAndFSRelPathForPath(targetExcludePath) + if err != nil { + return nil, nil, err + } + fsRootTargetExcludePaths[i] = fsRootTargetExcludePath } osRootBucket, err := storageosProvider.NewReadWriteBucket( fsRoot, storageos.ReadWriteBucketWithSymlinksIfSupported(), ) if err != nil { - return nil, err + return nil, nil, err } osRootBucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, osRootBucket, - inputSubDirPath, - nil, // TODO(doria): we should plumb paths down here - nil, // TODO(doria): we should plumb paths down here + fsRootInputSubDirPath, + fsRootTargetPaths, + fsRootTargetExcludePaths, terminateFunc, ) if err != nil { - return nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) + return nil, nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) } - // TODO(doria): I'd like to completely kill and refactor this if possible. // We now know where the workspace is relative to the FS root. // If the input path is provided as an absolute path, we create a new bucket for the // controlling workspace using an absolute path. @@ -610,39 +670,79 @@ func getReadWriteBucketForOS( // terminateFileLocation: path/to // returnMapPath: path/to // returnSubDirPath: foo - // Make bucket on: Rel(pwd, returnMapPath) + // Make bucket on: Rel(pwd, controlling workspace) // // inputDirPath: /users/alice/path/to/foo // terminateFileLocation: /users/alice/path/to // returnMapPath: /users/alice/path/to // returnSubDirPath: foo - // Make bucket on: FS root + returnMapPath (since absolute) + // Make bucket on: FS root + controlling workspace (since absolute) + // + // If no controlling workspace is found, we use the current working directory. var bucketPath string var inputPath string + bucketTargetPaths := make([]string, len(osRootBucketTargeting.TargetPaths())) + bucketTargetExcludePaths := make([]string, len(osRootBucketTargeting.TargetExcludePaths())) + pwd, err := osext.Getwd() + if err != nil { + return nil, nil, err + } + _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + if err != nil { + return nil, nil, err + } if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { - var err error - bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspacePath()) - inputPath, err = normalpath.Rel(bucketPath, normalpath.Join(fsRoot, inputSubDirPath)) - if err != nil { - return nil, err + if osRootBucketTargeting.ControllingWorkspace() != nil { + bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspace().Path()) + } else { + bucketPath = normalpath.Join(fsRoot, pwdFSRelPath) } + inputPath = osRootBucketTargeting.InputPath() + bucketTargetPaths = osRootBucketTargeting.TargetPaths() + bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { - pwd, err := osext.Getwd() - if err != nil { - return nil, err - } - _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) - if err != nil { - return nil, err - } - bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspacePath()) - if err != nil { - return nil, err - } - inputPath, err = normalpath.Rel(osRootBucketTargeting.ControllingWorkspacePath(), inputSubDirPath) - if err != nil { - return nil, err + if osRootBucketTargeting.ControllingWorkspace() != nil { + bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspace().Path()) + if err != nil { + return nil, nil, err + } + inputPath = osRootBucketTargeting.InputPath() + bucketTargetPaths = osRootBucketTargeting.TargetPaths() + bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() + + } else { + bucketPath = "." + inputPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.InputPath()) + if err != nil { + return nil, nil, err + } + for i, targetPath := range osRootBucketTargeting.TargetPaths() { + bucketTargetPaths[i], err = normalpath.Rel(pwdFSRelPath, targetPath) + if err != nil { + return nil, nil, err + } + } + for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { + bucketTargetExcludePaths[i], err = normalpath.Rel(pwdFSRelPath, targetExcludePath) + if err != nil { + return nil, nil, err + } + } } + //for i, osRootTargetPath := range osRootBucketTargeting.TargetPaths() { + // targetPath, err := normalpath.Rel(pwdFSRelPath, osRootTargetPath) + // if err != nil { + // return nil, nil, err + // } + // bucketTargetPaths[i] = targetPath + //} + //for i, osRootTargetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { + // targetExcludePath, err := normalpath.Rel(pwdFSRelPath, osRootTargetExcludePath) + // if err != nil { + // return nil, nil, err + // } + // bucketTargetExcludePaths[i] = targetExcludePath + //} } // Now that we've mapped the workspace against the FS root, we recreate the bucket with // at the sub dir path. @@ -654,24 +754,25 @@ func getReadWriteBucketForOS( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) if err != nil { - return nil, err + return nil, nil, err } bucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, bucket, inputPath, - nil, // TODO(doria): we should plumb paths down here - nil, // TODO(doria): we should plumb paths down here + bucketTargetPaths, + bucketTargetExcludePaths, terminateFunc, ) if err != nil { - return nil, err + return nil, nil, err } - return newReadWriteBucket( - bucket, - bucketTargeting, - ) + readWriteBucket, err := newReadWriteBucket(bucket, bucketPath, bucketTargeting) + if err != nil { + return nil, nil, err + } + return readWriteBucket, bucketTargeting, nil } // Use for ProtoFileRefs. @@ -681,19 +782,25 @@ func getReadBucketCloserForOSProtoFile( storageosProvider storageos.Provider, protoFilePath string, terminateFunc buftarget.TerminateFunc, -) (ReadBucketCloser, error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { + // For proto file refs, we treat the input directory as the directory of + // the file and the file as a target path. + // No other target paths and target exclude paths are supported with + // proto file refs. protoFileDir := normalpath.Dir(protoFilePath) - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, logger, storageosProvider, protoFileDir, + []string{protoFilePath}, + nil, // no target exclude paths are supported for proto file refs terminateFunc, ) if err != nil { - return nil, err + return nil, nil, err } - return newReadBucketCloserForReadWriteBucket(readWriteBucket), nil + return newReadBucketCloserForReadWriteBucket(readWriteBucket), bucketTargeting, nil } // fsRootAndFSRelPathForPath is a helper function that takes a path and returns the FS @@ -756,6 +863,8 @@ type getReadBucketCloserOptions struct { terminateFunc buftarget.TerminateFunc protoFileTerminateFunc buftarget.TerminateFunc copyToInMemory bool + targetPaths []string + targetExcludePaths []string } func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { @@ -763,7 +872,9 @@ func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { } type getReadWriteBucketOptions struct { - terminateFunc buftarget.TerminateFunc + terminateFunc buftarget.TerminateFunc + targetPaths []string + targetExcludePaths []string } func newGetReadWriteBucketOptions() *getReadWriteBucketOptions { diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index 0a8b40b30e..06f91b8b08 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -29,20 +29,26 @@ import ( "go.uber.org/zap" ) +// TODO(doria): add tests with target paths and target exclude paths + func TestGetReadBucketCloserForBucketNoTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Nil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, "three/four/five", readBucketCloser.SubDirPath()) + require.Equal(t, "three/four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -53,15 +59,19 @@ func TestGetReadBucketCloserTerminateFileName(t *testing.T) { ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -72,15 +82,19 @@ func TestGetReadBucketCloserForBucketNoSubDirPath(t *testing.T) { ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two/three/four/five") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), ".", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) + require.Nil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -93,15 +107,19 @@ func TestGetReadBucketCloserForBucketAbs(t *testing.T) { require.NoError(t, err) inputBucket, err := storageos.NewProvider().NewReadWriteBucket(normalpath.Join(absDirPath, "testdata/bufyaml/one/two")) require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -110,16 +128,20 @@ func TestGetReadBucketCloserForBucketAbs(t *testing.T) { func TestGetReadWriteBucketForOSNoTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) - require.Equal(t, ".", readWriteBucket.SubDirPath()) - fileInfo, err := readWriteBucket.Stat(ctx, "buf.yaml") + require.Nil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, "testdata/bufyaml/one/two/three/four/five", readWriteBucket.SubDirPath()) + require.Equal(t, "testdata/bufyaml/one/two/three/four/five", bucketTargeting.InputPath()) + fileInfo, err := readWriteBucket.Stat(ctx, "testdata/bufyaml/one/two/three/four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) } @@ -127,15 +149,19 @@ func TestGetReadWriteBucketForOSNoTerminateFileName(t *testing.T) { func TestGetReadWriteBucketForOSTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -158,15 +184,19 @@ func TestGetReadWriteBucketForOSParentPwd(t *testing.T) { panic(r) } }() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -191,15 +221,19 @@ func TestGetReadWriteBucketForOSAbsPwd(t *testing.T) { panic(r) } }() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five"), + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/buf.yaml"), filepath.ToSlash(fileInfo.ExternalPath())) @@ -211,15 +245,19 @@ func TestGetReadWriteBucketForOSAbsPwd(t *testing.T) { func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "proto", readBucketCloser.SubDirPath()) + require.Equal(t, "proto", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -229,15 +267,19 @@ func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testin func TestGetReadBucketCloserForOSProtoFileTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -261,15 +303,19 @@ func TestGetReadBucketCloserForOSProtoFileParentPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -295,15 +341,19 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/proto/foo.proto"), + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/buf.yaml"), filepath.ToSlash(fileInfo.ExternalPath())) @@ -316,15 +366,19 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { func TestGetReadBucketCloserForOSProtoFileNoBufYAMLTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -345,15 +399,19 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLParentPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -380,15 +438,19 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), fileInfo.ExternalPath()) @@ -404,14 +466,15 @@ func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) buftarget.Ter ctx context.Context, bucket storage.ReadBucket, prefix string, - originalSubDirPath string, - ) (bool, error) { + inputPath string, + ) (buftarget.ControllingWorkspace, error) { for _, terminateFileName := range terminateFileNames { + // We do not test for config file logic here, so it is fine to return empty configs. if _, err := bucket.Stat(ctx, normalpath.Join(prefix, terminateFileName)); err == nil { - return true, nil + return buftarget.NewControllingWorkspace(prefix, nil, nil), nil } } - return false, nil + return nil, nil }, ) } diff --git a/private/buf/buffetch/reader.go b/private/buf/buffetch/reader.go index b74603f134..1faa5123d4 100644 --- a/private/buf/buffetch/reader.go +++ b/private/buf/buffetch/reader.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/bufbuild/buf/private/buf/buffetch/internal" - "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/git" @@ -148,7 +148,7 @@ func (a *reader) GetSourceReadBucketCloser( container app.EnvStdinContainer, sourceRef SourceRef, options ...GetReadBucketCloserOption, -) (ReadBucketCloser, error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { getReadBucketCloserOptions := newGetReadBucketCloserOptions() for _, option := range options { option(getReadBucketCloserOptions) @@ -157,8 +157,7 @@ func (a *reader) GetSourceReadBucketCloser( if !getReadBucketCloserOptions.noSearch { internalGetReadBucketCloserOptions = append( internalGetReadBucketCloserOptions, - internal.WithGetReadBucketCloserTerminateFunc(bufconfig.TerminateAtControllingWorkspace), - internal.WithGetReadBucketCloserProtoFileTerminateFunc(bufconfig.TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef), + internal.WithGetReadBucketCloserTerminateFunc(buftarget.TerminateAtControllingWorkspace), ) } if getReadBucketCloserOptions.copyToInMemory { @@ -167,6 +166,14 @@ func (a *reader) GetSourceReadBucketCloser( internal.WithGetReadBucketCloserCopyToInMemory(), ) } + internalGetReadBucketCloserOptions = append( + internalGetReadBucketCloserOptions, + internal.WithGetReadBucketCloserTargetPaths(getReadBucketCloserOptions.targetPaths), + ) + internalGetReadBucketCloserOptions = append( + internalGetReadBucketCloserOptions, + internal.WithGetReadBucketCloserTargetExcludePaths(getReadBucketCloserOptions.targetExcludePaths), + ) return a.internalReader.GetReadBucketCloser( ctx, container, @@ -180,7 +187,7 @@ func (a *reader) GetDirReadWriteBucket( container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, -) (ReadWriteBucket, error) { +) (ReadWriteBucket, buftarget.BucketTargeting, error) { getReadWriteBucketOptions := newGetReadWriteBucketOptions() for _, option := range options { option(getReadWriteBucketOptions) @@ -189,9 +196,17 @@ func (a *reader) GetDirReadWriteBucket( if !getReadWriteBucketOptions.noSearch { internalGetReadWriteBucketOptions = append( internalGetReadWriteBucketOptions, - internal.WithGetReadWriteBucketTerminateFunc(bufconfig.TerminateAtControllingWorkspace), + internal.WithGetReadWriteBucketTerminateFunc(buftarget.TerminateAtControllingWorkspace), ) } + internalGetReadWriteBucketOptions = append( + internalGetReadWriteBucketOptions, + internal.WithGetReadWriteBucketTargetPaths(getReadWriteBucketOptions.targetPaths), + ) + internalGetReadWriteBucketOptions = append( + internalGetReadWriteBucketOptions, + internal.WithGetReadWriteBucketTargetExcludePaths(getReadWriteBucketOptions.targetExcludePaths), + ) return a.internalReader.GetReadWriteBucket( ctx, container, diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 8fb4c91771..6ad834af97 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -16,36 +16,33 @@ package buftarget import ( "context" - "fmt" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "go.uber.org/zap" ) -// TODO(doria): we need to add the following info: -// BufWorkYAMLDirPaths (for v1 vs. v2 workspaces) -// Document that everything here is relative paths - // BucketTargeting provides the path to the controllng workspace, target paths, and target // exclude paths mapped to the bucket. +// All paths for targeting information are normalized. type BucketTargeting interface { - // ControllingWorkspace returns the path for the controlling workspace relative to the root of the bucket. - ControllingWorkspacePath() string - // InputPath returns the input path relative to the root fo the bucket - // TODO(doria): should the input path be treated differently from all other target paths? + // ControllingWorkpsace returns the information for the controlling workspace, if one was + // found. If not found, then this will be nil. + ControllingWorkspace() ControllingWorkspace + // InputPath returns the input path relative to the root of the bucket InputPath() string // TargetPaths returns the target paths relative to the root of the bucket. TargetPaths() []string // TargetExcludePaths returns the target exclude paths relative to the root of the bucket. TargetExcludePaths() []string - // Terminated returns whether the controlling workspace was found through the terminateFunc. - // TODO(doria): should be be able to kill this. - Terminated() bool isBucketTargeting() } +// NewBucketTargeting returns new targeting information for the given bucket, input path, +// target paths, and target exclude paths. +// +// Paths must be relative. func NewBucketTargeting( ctx context.Context, logger *zap.Logger, @@ -53,7 +50,7 @@ func NewBucketTargeting( inputPath string, targetPaths []string, targetExcludePaths []string, - terminateFunc TerminateFunc, // TODO(doria): move that out of buffetch + terminateFunc TerminateFunc, ) (BucketTargeting, error) { return newBucketTargeting(ctx, logger, bucket, inputPath, targetPaths, targetExcludePaths, terminateFunc) } @@ -65,15 +62,14 @@ var ( ) type bucketTargeting struct { - controllingWorkspacePath string - inputPath string - targetPaths []string - targetExcludePaths []string - terminated bool + controllingWorkspace ControllingWorkspace + inputPath string + targetPaths []string + targetExcludePaths []string } -func (b *bucketTargeting) ControllingWorkspacePath() string { - return b.controllingWorkspacePath +func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { + return b.controllingWorkspace } func (b *bucketTargeting) InputPath() string { @@ -88,10 +84,6 @@ func (b *bucketTargeting) TargetExcludePaths() []string { return b.targetExcludePaths } -func (b *bucketTargeting) Terminated() bool { - return b.terminated -} - func (b *bucketTargeting) isBucketTargeting() { } @@ -105,76 +97,73 @@ func newBucketTargeting( terminateFunc TerminateFunc, ) (*bucketTargeting, error) { // First we map the controlling workspace for the inputPath. - controllingWorkspacePath, mappedInputPath, terminated, err := mapControllingWorkspaceAndPath( + controllingWorkspace, mappedInputPath, err := mapControllingWorkspaceAndPath( ctx, logger, bucket, inputPath, + inputPath, terminateFunc, ) if err != nil { return nil, err } - mappedTargetPaths := make([]string, len(targetPaths)) - // Then we do the same for each target path. If the target paths resolve to different - // controlling workspaces, then we return an error. - // We currently do not compile nested workspaces, but this algorithm lets us potentially - // handle nested workspaces in the future. - // TODO(doria): this shouldn't have a big impact on performance, right? - // TODO(doria): do we need to handle that there was a termination through the paths? maybe. - for i, targetPath := range targetPaths { - controllingWorkspacePathForTargetPath, mappedPath, _, err := mapControllingWorkspaceAndPath( - ctx, - logger, - bucket, - targetPath, - terminateFunc, - ) - if err != nil { - return nil, err - } - if controllingWorkspacePathForTargetPath != controllingWorkspacePath { - return nil, fmt.Errorf("more than one workspace resolved for given paths: %q, %q", controllingWorkspacePathForTargetPath, controllingWorkspacePath) + // If no controlling workspace was found, we map the target paths and target exclude + // paths to the input path. + mappedTargetPaths := targetPaths + mappedTargetExcludePaths := targetExcludePaths + if controllingWorkspace != nil && controllingWorkspace.Path() != "." { + // If a controlling workspace was found, we map the paths to the controlling workspace + // because we'll be working with a workspace bucket. + for i, targetPath := range targetPaths { + targetPath, err := normalpath.NormalizeAndValidate(targetPath) + if err != nil { + return nil, err + } + mappedTargetPath, err := normalpath.Rel(controllingWorkspace.Path(), targetPath) + if err != nil { + return nil, err + } + mappedTargetPaths[i] = mappedTargetPath } - mappedTargetPaths[i] = mappedPath - } - // NOTE: we do not map excluded paths to their own workspaces -- we use the controlling - // workspace we resolved through our input path and target paths. If an excluded path does - // not exist, we do not validate this. - mappedTargetExcludePaths := make([]string, len(targetExcludePaths)) - for i, targetExcludePath := range targetExcludePaths { - mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspacePath, targetExcludePath) - if err != nil { - return nil, err + for i, targetExcludePath := range targetExcludePaths { + targetExcludePath, err := normalpath.NormalizeAndValidate(targetExcludePath) + if err != nil { + return nil, err + } + mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace.Path(), targetExcludePath) + if err != nil { + return nil, err + } + mappedTargetExcludePaths[i] = mappedTargetExcludePath } - mappedTargetExcludePaths[i] = mappedTargetExcludePath } return &bucketTargeting{ - controllingWorkspacePath: controllingWorkspacePath, - inputPath: mappedInputPath, - targetPaths: mappedTargetPaths, - targetExcludePaths: mappedTargetExcludePaths, - terminated: terminated, + controllingWorkspace: controllingWorkspace, + inputPath: mappedInputPath, + targetPaths: mappedTargetPaths, + targetExcludePaths: mappedTargetExcludePaths, }, nil } // mapControllingWorkspaceAndPath takes a bucket, path, and terminate func and returns the -// controlling workspace path and mapped path. +// controlling workspace and mapped path. func mapControllingWorkspaceAndPath( ctx context.Context, logger *zap.Logger, bucket storage.ReadBucket, path string, + originalInputPath string, terminateFunc TerminateFunc, -) (string, string, bool, error) { +) (ControllingWorkspace, string, error) { path, err := normalpath.NormalizeAndValidate(path) if err != nil { - return "", "", false, err + return nil, "", err } // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at // the path. if terminateFunc == nil { - return path, ".", false, nil + return nil, path, nil } // We can't do this in a traditional loop like this: // @@ -186,11 +175,11 @@ func mapControllingWorkspaceAndPath( // Instead, we effectively do a do-while loop. curDirPath := path for { - terminate, err := terminateFunc(ctx, bucket, curDirPath, path) + controllingWorkspace, err := terminateFunc(ctx, bucket, curDirPath, originalInputPath) if err != nil { - return "", "", false, err + return nil, "", err } - if terminate { + if controllingWorkspace != nil { logger.Debug( "buffetch termination found", zap.String("curDirPath", curDirPath), @@ -198,9 +187,9 @@ func mapControllingWorkspaceAndPath( ) subDirPath, err := normalpath.Rel(curDirPath, path) if err != nil { - return "", "", false, err + return nil, "", err } - return curDirPath, subDirPath, true, nil + return controllingWorkspace, subDirPath, nil } if curDirPath == "." { break @@ -211,5 +200,6 @@ func mapControllingWorkspaceAndPath( "buffetch no termination found", zap.String("path", path), ) - return curDirPath, ".", false, nil + // No controlling workspace is found, we simply return the input path + return nil, path, nil } diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go new file mode 100644 index 0000000000..eba876c8e8 --- /dev/null +++ b/private/buf/buftarget/controlling_workspace.go @@ -0,0 +1,59 @@ +package buftarget + +import ( + "github.com/bufbuild/buf/private/bufpkg/bufconfig" +) + +type ControllingWorkspace interface { + // Path of the controlling workspace in the bucket. + Path() string + // Returns a buf.work.yaml file that was found. This is empty if we are retruning a buf.yaml. + BufWorkYAMLFile() bufconfig.BufWorkYAMLFile + // Returns a buf.yaml that was found. This is empty if we are returning a buf.work.yaml. + // Can be a v1 or v2 buf.yaml. + BufYAMLFile() bufconfig.BufYAMLFile +} + +func NewControllingWorkspace( + path string, + bufWorkYAMLFile bufconfig.BufWorkYAMLFile, + bufYAMLFile bufconfig.BufYAMLFile, +) ControllingWorkspace { + return newControllingWorkspace(path, bufWorkYAMLFile, bufYAMLFile) +} + +// *** PRIVATE *** + +var ( + _ ControllingWorkspace = &controllingWorkspace{} +) + +type controllingWorkspace struct { + path string + bufWorkYAMLFile bufconfig.BufWorkYAMLFile + bufYAMLFile bufconfig.BufYAMLFile +} + +func newControllingWorkspace( + path string, + bufWorkYAMLFile bufconfig.BufWorkYAMLFile, + bufYAMLFile bufconfig.BufYAMLFile, +) ControllingWorkspace { + return &controllingWorkspace{ + path: path, + bufWorkYAMLFile: bufWorkYAMLFile, + bufYAMLFile: bufYAMLFile, + } +} + +func (c *controllingWorkspace) Path() string { + return c.path +} + +func (c *controllingWorkspace) BufWorkYAMLFile() bufconfig.BufWorkYAMLFile { + return c.bufWorkYAMLFile +} + +func (c *controllingWorkspace) BufYAMLFile() bufconfig.BufYAMLFile { + return c.bufYAMLFile +} diff --git a/private/buf/buftarget/terminate.go b/private/buf/buftarget/terminate.go index 251e9ed35e..1893e3e2f5 100644 --- a/private/buf/buftarget/terminate.go +++ b/private/buf/buftarget/terminate.go @@ -16,19 +16,75 @@ package buftarget import ( "context" + "errors" + "fmt" + "io/fs" + "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" ) // TerminateFunc is a termination function. -// -// This function should return true if the search should terminate at the prefix, that is -// the prefix should be where the bucket is mapped onto. -// -// The original subDirPath is also given to the TerminateFunc. type TerminateFunc func( ctx context.Context, bucket storage.ReadBucket, prefix string, - originalSubDirPath string, -) (terminate bool, err error) + originalInputPath string, +) (ControllingWorkspace, error) + +// TerminateAtControllingWorkspace implements a TerminateFunc and returns controlling workspace +// if one is found at the given prefix. +// +// All paths must be relative. +func TerminateAtControllingWorkspace( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + return terminateAtControllingWorkspace(ctx, bucket, prefix, originalInputPath) +} + +// *** PRIVATE *** + +func terminateAtControllingWorkspace( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + bufWorkYAMLFile, err := bufconfig.GetBufWorkYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + bufWorkYAMLExists := err == nil + bufYAMLFile, err := bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + bufYAMLExists := err == nil + if bufWorkYAMLExists && bufYAMLExists { + // This isn't actually the external directory path, but we do the best we can here for now. + return nil, fmt.Errorf("cannot have a buf.work.yaml and buf.yaml in the same directory %q", prefix) + } + if bufYAMLExists && bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + // We don't require the workspace to point to the prefix (likely because we're + // finding the controlling workspace for a ProtoFileRef), we're good to go. + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + if bufWorkYAMLExists { + // For v1 workspaces, we ensure that the module paths lists actually contain the the + // original input paths. If not, then we do not use this workspace. + relDirPath, err := normalpath.Rel(prefix, originalInputPath) + if err != nil { + return nil, err + } + _, refersToCurDirPath := slicesext.ToStructMap(bufWorkYAMLFile.DirPaths())[relDirPath] + if prefix == originalInputPath || refersToCurDirPath { + return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + } + } + return nil, nil +} diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 9034b9b803..300b9f38c4 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -17,6 +17,7 @@ package bufworkspace import ( "fmt" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/stringutil" @@ -25,10 +26,13 @@ import ( type moduleTargeting struct { // Whether this module is really a target module. // + // TODO(doria): protofile ref // False if this was not specified as a target module by the caller. - // Also false if there were config.targetPaths or config.protoFileTargetPath, but + // Also false if there were bucketTargeting.TargetPaths() or bucketTargeting.protoFileTargetPath, but // these paths did not match anything in the module. isTargetModule bool + // moduleDirPath is the directory path of the module + moduleDirPath string // relative to the actual moduleDirPath and the roots parsed from the buf.yaml moduleTargetPaths []string // relative to the actual moduleDirPath and the roots parsed from the buf.yaml @@ -41,21 +45,25 @@ type moduleTargeting struct { func newModuleTargeting( moduleDirPath string, roots []string, + bucketTargeting buftarget.BucketTargeting, config *workspaceBucketConfig, isTentativelyTargetModule bool, ) (*moduleTargeting, error) { if !isTentativelyTargetModule { // If this is not a target Module, we do not want to target anything, as targeting // paths for non-target Modules is an error. - return &moduleTargeting{}, nil + return &moduleTargeting{moduleDirPath: moduleDirPath}, nil } // If we have no target paths, then we always match the value of isTargetModule. // Otherwise, we need to see that at least one path matches the moduleDirPath for us // to consider this module a target. - isTargetModule := len(config.targetPaths) == 0 && config.protoFileTargetPath == "" + // TODO(doria): deal with proto file ref appropriately + isTargetModule := len(bucketTargeting.TargetPaths()) == 0 && config.protoFileTargetPath == "" var moduleTargetPaths []string var moduleTargetExcludePaths []string - for _, targetPath := range config.targetPaths { + // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths + // since those are stripped of the path to the module. + for _, targetPath := range bucketTargeting.TargetPaths() { if targetPath == moduleDirPath { // We're just going to be realists in our error messages here. // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, @@ -71,7 +79,9 @@ func newModuleTargeting( moduleTargetPaths = append(moduleTargetPaths, moduleTargetPath) } } - for _, targetExcludePath := range config.targetExcludePaths { + // We use the bucketTargeting.TargetExcludePaths() instead of the workspace config target + // exclude paths since those are stripped of the path to the module. + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { if targetExcludePath == moduleDirPath { // We're just going to be realists in our error messages here. // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, @@ -121,6 +131,7 @@ func newModuleTargeting( includePackageFiles = config.includePackageFiles } return &moduleTargeting{ + moduleDirPath: moduleDirPath, isTargetModule: isTargetModule, moduleTargetPaths: moduleTargetPaths, moduleTargetExcludePaths: moduleTargetExcludePaths, diff --git a/private/buf/bufworkspace/workspace_dep_manager_provider.go b/private/buf/bufworkspace/workspace_dep_manager_provider.go index f89b731bf0..e2004ff761 100644 --- a/private/buf/bufworkspace/workspace_dep_manager_provider.go +++ b/private/buf/bufworkspace/workspace_dep_manager_provider.go @@ -17,6 +17,8 @@ package bufworkspace import ( "context" + "github.com/bufbuild/buf/private/buf/buftarget" + "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "github.com/bufbuild/buf/private/pkg/tracing" @@ -42,6 +44,7 @@ type WorkspaceDepManagerProvider interface { GetWorkspaceDepManager( ctx context.Context, bucket storage.ReadWriteBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceDepManagerOption, ) (WorkspaceDepManager, error) } @@ -77,29 +80,16 @@ func newWorkspaceDepManagerProvider( func (w *workspaceDepManagerProvider) GetWorkspaceDepManager( ctx context.Context, bucket storage.ReadWriteBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceDepManagerOption, ) (_ WorkspaceDepManager, retErr error) { - config, err := newWorkspaceDepManagerConfig(options) - if err != nil { - return nil, err + controllingWorkspace := bucketTargeting.ControllingWorkspace() + if controllingWorkspace == nil || controllingWorkspace.BufWorkYAMLFile() != nil { + return nil, syserror.New("no supported workspace found") } - workspaceTargeting, err := newWorkspaceTargeting( - ctx, - w.logger, - bucket, - config.targetSubDirPath, - nil, - true, // Disallow buf.work.yamls when doing management of deps for v1 - ) - if err != nil { - return nil, err - } - if workspaceTargeting.isV2() { - return newWorkspaceDepManager(bucket, workspaceTargeting.v2DirPath, true), nil - } - if len(workspaceTargeting.v1DirPaths) != 1 { - // This is because of disallowing buf.work.yamls - return nil, syserror.Newf("expected a single v1 dir path from workspace targeting but got %v", workspaceTargeting.v1DirPaths) + bufYAMLFile := controllingWorkspace.BufYAMLFile() + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + return newWorkspaceDepManager(bucket, controllingWorkspace.Path(), true), nil } - return newWorkspaceDepManager(bucket, workspaceTargeting.v1DirPaths[0], false), nil + return newWorkspaceDepManager(bucket, bucketTargeting.InputPath(), false), nil } diff --git a/private/buf/bufworkspace/workspace_provider.go b/private/buf/bufworkspace/workspace_provider.go index b2c607ffba..059c1fb9c7 100644 --- a/private/buf/bufworkspace/workspace_provider.go +++ b/private/buf/bufworkspace/workspace_provider.go @@ -20,10 +20,9 @@ import ( "fmt" "io/fs" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" - "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "github.com/bufbuild/buf/private/pkg/tracing" @@ -52,6 +51,7 @@ type WorkspaceProvider interface { GetWorkspaceForBucket( ctx context.Context, bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceBucketOption, ) (Workspace, error) @@ -211,11 +211,11 @@ func (w *workspaceProvider) GetWorkspaceForModuleKey( func (w *workspaceProvider) GetWorkspaceForBucket( ctx context.Context, bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceBucketOption, ) (_ Workspace, retErr error) { ctx, span := w.tracer.Start(ctx, tracing.WithErr(&retErr)) defer span.End() - config, err := newWorkspaceBucketConfig(options) if err != nil { return nil, err @@ -230,95 +230,42 @@ func (w *workspaceProvider) GetWorkspaceForBucket( workspaceTargeting, err := newWorkspaceTargeting( ctx, w.logger, + config, bucket, - config.targetSubDirPath, + bucketTargeting, overrideBufYAMLFile, config.ignoreAndDisallowV1BufWorkYAMLs, ) if err != nil { return nil, err } - if workspaceTargeting.isV2() { + if workspaceTargeting.v2 != nil { return w.getWorkspaceForBucketBufYAMLV2( ctx, bucket, - config, - workspaceTargeting.v2DirPath, - overrideBufYAMLFile, + workspaceTargeting.v2, ) } return w.getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ctx, bucket, - config, - workspaceTargeting.v1DirPaths, - overrideBufYAMLFile, + workspaceTargeting.v1, ) } func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ctx context.Context, bucket storage.ReadBucket, - config *workspaceBucketConfig, - moduleDirPaths []string, - // This can be nil, this is only set if config.configOverride was set, which we - // deal with outside of this function. - overrideBufYAMLFile bufconfig.BufYAMLFile, + v1WorkspaceTargeting *v1Targeting, ) (*workspace, error) { - // config.targetSubDirPath is the input subDirPath. We only want to target modules that are inside - // this subDirPath. Example: bufWorkYAMLDirPath is "foo", subDirPath is "foo/bar", - // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" - // and "foo/bar/bat". - // - // This is new behavior - before, we required that you input an exact match for the module directory path, - // but now, we will take all the modules underneath this workspace. - isTargetFunc := func(moduleDirPath string) bool { - return normalpath.EqualsOrContainsPath(config.targetSubDirPath, moduleDirPath, normalpath.Relative) - } moduleSetBuilder := bufmodule.NewModuleSetBuilder(ctx, w.tracer, w.moduleDataProvider, w.commitProvider) - bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) - // We use this to detect different refs across different files. - moduleFullNameStringToConfiguredDepModuleRefString := make(map[string]string) - var allConfiguredDepModuleRefs []bufmodule.ModuleRef - // We keep track of if any module was tentatively targeted, and then actually targeted via - // the paths flags. We use this pre-building of the ModuleSet to see if the --path and - // --exclude-path flags resulted in no targeted modules. This condition is represented - // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false - // - // If hadIsTentativelyTargetModule is false, this means that our input subDirPath was not - // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml - // directories, and this is a system error - this should be verified before we reach this function. - var hadIsTentativelyTargetModule bool - var hadIsTargetModule bool - for _, moduleDirPath := range moduleDirPaths { - moduleConfig, configuredDepModuleRefs, err := getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( - ctx, - bucket, - moduleDirPath, - overrideBufYAMLFile, - ) - if err != nil { - return nil, err - } - for _, configuredDepModuleRef := range configuredDepModuleRefs { - moduleFullNameString := configuredDepModuleRef.ModuleFullName().String() - configuredDepModuleRefString := configuredDepModuleRef.String() - existingConfiguredDepModuleRefString, ok := moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] - if !ok { - // We haven't encountered a ModuleRef with this ModuleFullName yet, add it. - allConfiguredDepModuleRefs = append(allConfiguredDepModuleRefs, configuredDepModuleRef) - moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] = configuredDepModuleRefString - } else if configuredDepModuleRefString != existingConfiguredDepModuleRefString { - // We encountered the same ModuleRef by ModuleFullName, but with a different Ref. - return nil, fmt.Errorf("found different refs for the same module within buf.yaml deps in the workspace: %s %s", configuredDepModuleRefString, existingConfiguredDepModuleRefString) - } - } - bucketIDToModuleConfig[moduleDirPath] = moduleConfig + for _, moduleBucketAndTargeting := range v1WorkspaceTargeting.moduleBucketsAndTargeting { + mappedModuleBucket := moduleBucketAndTargeting.bucket + moduleTargeting := moduleBucketAndTargeting.moduleTargeting bufLockFile, err := bufconfig.GetBufLockFileForPrefix( ctx, - bucket, - // buf.lock files live at the module root - moduleDirPath, + bucket, // Need to use the non-mapped bucket since the mapped bucket excludes the buf.lock + moduleTargeting.moduleDirPath, bufconfig.BufLockFileWithDigestResolver( func(ctx context.Context, remote string, commitID uuid.UUID) (bufmodule.Digest, error) { commitKey, err := bufmodule.NewCommitKey(remote, commitID, bufmodule.DigestTypeB4) @@ -354,40 +301,27 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ) } } - // We figure out based on the paths if this is really a target module in moduleTargeting. - isTentativelyTargetModule := isTargetFunc(moduleDirPath) - if isTentativelyTargetModule { - hadIsTentativelyTargetModule = true - } - mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( - ctx, - bucket, - config, - moduleDirPath, - moduleConfig, - isTentativelyTargetModule, - ) - if err != nil { - return nil, err - } - if moduleTargeting.isTargetModule { - hadIsTargetModule = true - } - v1BufYAMLObjectData, err := bufconfig.GetBufYAMLV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleDirPath) + v1BufYAMLObjectData, err := bufconfig.GetBufYAMLV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleTargeting.moduleDirPath) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, err } } - v1BufLockObjectData, err := bufconfig.GetBufLockV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleDirPath) + v1BufLockObjectData, err := bufconfig.GetBufLockV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleTargeting.moduleDirPath) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, err } } + moduleConfig, ok := v1WorkspaceTargeting.bucketIDToModuleConfig[moduleTargeting.moduleDirPath] + if !ok { + // This should not happen since moduleBucketAndTargeting is derived from the module + // configs, however, we return this error as a safety check + return nil, fmt.Errorf("no module config found for module at: %q", moduleTargeting.moduleDirPath) + } moduleSetBuilder.AddLocalModule( mappedModuleBucket, - moduleDirPath, + moduleTargeting.moduleDirPath, // bucket ID moduleTargeting.isTargetModule, bufmodule.LocalModuleWithModuleFullName(moduleConfig.ModuleFullName()), bufmodule.LocalModuleWithTargetPaths( @@ -402,21 +336,14 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( bufmodule.LocalModuleWithV1Beta1OrV1BufLockObjectData(v1BufLockObjectData), ) } - if !hadIsTentativelyTargetModule { - return nil, syserror.Newf("subDirPath %q did not result in any target modules from moduleDirPaths %v", config.targetSubDirPath, moduleDirPaths) - } - if !hadIsTargetModule { - // It would be nice to have a better error message than this in the long term. - return nil, bufmodule.ErrNoTargetProtoFiles - } moduleSet, err := moduleSetBuilder.Build() if err != nil { return nil, err } return w.getWorkspaceForBucketModuleSet( moduleSet, - bucketIDToModuleConfig, - allConfiguredDepModuleRefs, + v1WorkspaceTargeting.bucketIDToModuleConfig, + v1WorkspaceTargeting.allConfiguredDepModuleRefs, false, ) } @@ -424,41 +351,9 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ctx context.Context, bucket storage.ReadBucket, - config *workspaceBucketConfig, - dirPath string, - // This can be nil, this is only set if config.configOverride was set, which we - // deal with outside of this function. - overrideBufYAMLFile bufconfig.BufYAMLFile, + v2Targeting *v2Targeting, ) (*workspace, error) { - bucket = storage.MapReadBucket(bucket, storage.MapOnPrefix(dirPath)) - - var bufYAMLFile bufconfig.BufYAMLFile - var err error - if overrideBufYAMLFile != nil { - bufYAMLFile = overrideBufYAMLFile - } else { - bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, ".") - if err != nil { - // This should be apparent from above functions. - return nil, syserror.Newf("error getting v2 buf.yaml: %w", err) - } - } - if bufYAMLFile.FileVersion() != bufconfig.FileVersionV2 { - return nil, syserror.Newf("expected v2 buf.yaml but got %v", bufYAMLFile.FileVersion()) - } - - // config.targetSubDirPath is the input targetSubDirPath. We only want to target modules that are inside - // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", - // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" - // and "foo/bar/bat". - // - // This is new behavior - before, we required that you input an exact match for the module directory path, - // but now, we will take all the modules underneath this workspace. - isTargetFunc := func(moduleDirPath string) bool { - return normalpath.EqualsOrContainsPath(config.targetSubDirPath, moduleDirPath, normalpath.Relative) - } moduleSetBuilder := bufmodule.NewModuleSetBuilder(ctx, w.tracer, w.moduleDataProvider, w.commitProvider) - bufLockFile, err := bufconfig.GetBufLockFileForPrefix( ctx, bucket, @@ -488,45 +383,18 @@ func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ) } } - - bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) - // We keep track of if any module was tentatively targeted, and then actually targeted via - // the paths flags. We use this pre-building of the ModuleSet to see if the --path and - // --exclude-path flags resulted in no targeted modules. This condition is represented - // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false - // - // If hadIsTentativelyTargetModule is false, this means that our input subDirPath was not - // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml - // directories, and this is a system error - this should be verified before we reach this function. - var hadIsTentativelyTargetModule bool - var hadIsTargetModule bool - var moduleDirPaths []string - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - moduleDirPath := moduleConfig.DirPath() - moduleDirPaths = append(moduleDirPaths, moduleDirPath) - bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // We figure out based on the paths if this is really a target module in moduleTargeting. - isTentativelyTargetModule := isTargetFunc(moduleDirPath) - if isTentativelyTargetModule { - hadIsTentativelyTargetModule = true - } - mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( - ctx, - bucket, - config, - moduleDirPath, - moduleConfig, - isTentativelyTargetModule, - ) - if err != nil { - return nil, err - } - if moduleTargeting.isTargetModule { - hadIsTargetModule = true + for _, moduleBucketAndTargeting := range v2Targeting.moduleBucketsAndTargeting { + mappedModuleBucket := moduleBucketAndTargeting.bucket + moduleTargeting := moduleBucketAndTargeting.moduleTargeting + moduleConfig, ok := v2Targeting.bucketIDToModuleConfig[moduleTargeting.moduleDirPath] + if !ok { + // This should not happen since moduleBucketAndTargeting is derived from the module + // configs, however, we return this error as a safety check + return nil, fmt.Errorf("no module config found for module at: %q", moduleTargeting.moduleDirPath) } moduleSetBuilder.AddLocalModule( mappedModuleBucket, - moduleDirPath, + moduleTargeting.moduleDirPath, moduleTargeting.isTargetModule, bufmodule.LocalModuleWithModuleFullName(moduleConfig.ModuleFullName()), bufmodule.LocalModuleWithTargetPaths( @@ -539,22 +407,14 @@ func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ), ) } - if !hadIsTentativelyTargetModule { - return nil, syserror.Newf("targetSubDirPath %q did not result in any target modules from moduleDirPaths %v", config.targetSubDirPath, moduleDirPaths) - } - if !hadIsTargetModule { - // It would be nice to have a better error message than this in the long term. - return nil, bufmodule.ErrNoTargetProtoFiles - } moduleSet, err := moduleSetBuilder.Build() if err != nil { return nil, err } - // bufYAMLFile.ConfiguredDepModuleRefs() is unique by ModuleFullName. return w.getWorkspaceForBucketModuleSet( moduleSet, - bucketIDToModuleConfig, - bufYAMLFile.ConfiguredDepModuleRefs(), + v2Targeting.bucketIDToModuleConfig, + v2Targeting.bufYAMLFile.ConfiguredDepModuleRefs(), true, ) } @@ -591,101 +451,3 @@ func (w *workspaceProvider) getWorkspaceForBucketModuleSet( isV2, ), nil } - -func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( - ctx context.Context, - bucket storage.ReadBucket, - moduleDirPath string, - overrideBufYAMLFile bufconfig.BufYAMLFile, -) (bufconfig.ModuleConfig, []bufmodule.ModuleRef, error) { - var bufYAMLFile bufconfig.BufYAMLFile - var err error - if overrideBufYAMLFile != nil { - bufYAMLFile = overrideBufYAMLFile - } else { - bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, moduleDirPath) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // If we do not have a buf.yaml, we use the default config. - // This is a v1 config. - return bufconfig.DefaultModuleConfigV1, nil, nil - } - return nil, nil, err - } - } - // Just a sanity check. This should have already been validated, but let's make sure. - if bufYAMLFile.FileVersion() != bufconfig.FileVersionV1Beta1 && bufYAMLFile.FileVersion() != bufconfig.FileVersionV1 { - return nil, nil, syserror.Newf("buf.yaml at %s did not have version v1beta1 or v1", moduleDirPath) - } - moduleConfigs := bufYAMLFile.ModuleConfigs() - if len(moduleConfigs) != 1 { - // This is a system error. This should never happen. - return nil, nil, syserror.Newf("received %d ModuleConfigs from a v1beta1 or v1 BufYAMLFile", len(moduleConfigs)) - } - return moduleConfigs[0], bufYAMLFile.ConfiguredDepModuleRefs(), nil -} - -func getMappedModuleBucketAndModuleTargeting( - ctx context.Context, - bucket storage.ReadBucket, - config *workspaceBucketConfig, - moduleDirPath string, - moduleConfig bufconfig.ModuleConfig, - isTargetModule bool, -) (storage.ReadBucket, *moduleTargeting, error) { - moduleBucket := storage.MapReadBucket( - bucket, - storage.MapOnPrefix(moduleDirPath), - ) - rootToExcludes := moduleConfig.RootToExcludes() - var rootBuckets []storage.ReadBucket - for root, excludes := range rootToExcludes { - // Roots only applies to .proto files. - mappers := []storage.Mapper{ - // need to do match extension here - // https://github.com/bufbuild/buf/issues/113 - storage.MatchPathExt(".proto"), - storage.MapOnPrefix(root), - } - if len(excludes) != 0 { - var notOrMatchers []storage.Matcher - for _, exclude := range excludes { - notOrMatchers = append( - notOrMatchers, - storage.MatchPathContained(exclude), - ) - } - mappers = append( - mappers, - storage.MatchNot( - storage.MatchOr( - notOrMatchers..., - ), - ), - ) - } - rootBuckets = append( - rootBuckets, - storage.MapReadBucket( - moduleBucket, - mappers..., - ), - ) - } - rootBuckets = append( - rootBuckets, - bufmodule.GetDocStorageReadBucket(ctx, moduleBucket), - bufmodule.GetLicenseStorageReadBucket(moduleBucket), - ) - mappedModuleBucket := storage.MultiReadBucket(rootBuckets...) - moduleTargeting, err := newModuleTargeting( - moduleDirPath, - slicesext.MapKeysToSlice(rootToExcludes), - config, - isTargetModule, - ) - if err != nil { - return nil, nil, err - } - return mappedModuleBucket, moduleTargeting, nil -} diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 2898e5dc45..c5dfa8638d 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -17,72 +17,101 @@ package bufworkspace import ( "context" "errors" + "fmt" + "io/fs" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "go.uber.org/zap" ) -// workspaceTargeting figures out what directories to target and at what version for both -// the WorkpaceProvider and WorkspaceDepManagerProvider. +// workspaceTargeting figures out if we are working with a v1 or v2 workspace based on +// buftarget.BucketTargeting information and provides the workspace targeting information +// to WorkspaceProvider and WorkspaceDepManagerProvider. +// +// For v1 workspaces, this provides the bucket IDs to module configs, mapped module buckets and +// module targeting information, and all configured dependency module refs. +// +// # For v2 workspaces, this provides the bucket IDs to module configs, module dir paths, +// and mapped module buckets and module targeting. +// +// In the case where no controlling workspace is found, we default to a v1 workspace with +// a single module directory path, which is equivalent to the input path. +// +// We only ever return one of v1 or v2. type workspaceTargeting struct { - // v1DirPaths are the target v1 directory paths that should contain Modules. - // - // This is set if we are within a buf.work.yaml, if we directly targeted a v1 buf.yaml, or - // if no configuration was found (in which case we default to v1). - v1DirPaths []string - // v2DirPath is the path to the v2 buf.yaml. - v2DirPath string + v1 *v1Targeting + v2 *v2Targeting +} + +type v1Targeting struct { + bucketIDToModuleConfig map[string]bufconfig.ModuleConfig + moduleBucketsAndTargeting []*moduleBucketAndModuleTargeting + allConfiguredDepModuleRefs []bufmodule.ModuleRef +} + +type v2Targeting struct { + bufYAMLFile bufconfig.BufYAMLFile + bucketIDToModuleConfig map[string]bufconfig.ModuleConfig + moduleBucketsAndTargeting []*moduleBucketAndModuleTargeting +} + +type moduleBucketAndModuleTargeting struct { + bucket storage.ReadBucket + moduleTargeting *moduleTargeting } func newWorkspaceTargeting( ctx context.Context, logger *zap.Logger, + config *workspaceBucketConfig, bucket storage.ReadBucket, - targetSubDirPath string, + bucketTargeting buftarget.BucketTargeting, overrideBufYAMLFile bufconfig.BufYAMLFile, ignoreAndDisallowV1BufWorkYAMLs bool, ) (*workspaceTargeting, error) { - targetSubDirPath, err := normalpath.NormalizeAndValidate(targetSubDirPath) - if err != nil { - return nil, err - } if overrideBufYAMLFile != nil { logger.Debug( "targeting workspace with config override", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input path", bucketTargeting.InputPath()), ) switch fileVersion := overrideBufYAMLFile.FileVersion(); fileVersion { case bufconfig.FileVersionV1Beta1, bufconfig.FileVersionV1: - // Operate as if there was no buf.work.yaml, only a v1 buf.yaml at the specified - // targetSubDirPath, specifying a single module. - return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, - }, nil + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputPath()}, // we assume a v1 workspace with a single v1 module at the input path + overrideBufYAMLFile, + ) case bufconfig.FileVersionV2: - // Operate as if there was a v2 buf.yaml at the target sub directory path. - return &workspaceTargeting{ - v2DirPath: targetSubDirPath, - }, nil + return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, overrideBufYAMLFile) default: return nil, syserror.Newf("unknown FileVersion: %v", fileVersion) } } - - findControllingWorkspaceResult, err := bufconfig.FindControllingWorkspace(ctx, bucket, ".", targetSubDirPath) - if err != nil { - return nil, err - } - if findControllingWorkspaceResult.Found() { - // We have a v1 buf.work.yaml, per the documentation on bufconfig.FindControllingWorkspace. - if bufWorkYAMLDirPaths := findControllingWorkspaceResult.BufWorkYAMLDirPaths(); len(bufWorkYAMLDirPaths) > 0 { + if controllingWorkspace := bucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { + // This is a v2 workspace. + if controllingWorkspace.BufYAMLFile() != nil { + logger.Debug( + "targeting workspace based on v2 buf.yaml", + zap.String("input path", bucketTargeting.InputPath()), + ) + return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, controllingWorkspace.BufYAMLFile()) + } + // This is a v1 workspace. + if bufWorkYAMLFile := controllingWorkspace.BufWorkYAMLFile(); bufWorkYAMLFile != nil { if ignoreAndDisallowV1BufWorkYAMLs { - // targetSubDirPath is normalized, so if it was empty, it will be ".". - if targetSubDirPath == "." { - // If config.targetSubDirPath is ".", this means we targeted a buf.work.yaml, not an individual module within the buf.work.yaml - // This is disallowed. + // This means that we attempted to target a v1 workspace at the buf.work.yaml, not + // an individual module within the v1 workspace defined in buf.work.yaml. + // This is disallowed. + if bucketTargeting.InputPath() == "." { return nil, errors.New(`Workspaces defined with buf.work.yaml cannot be updated or pushed, only the individual modules within a workspace can be updated or pushed. Workspaces defined with a v2 buf.yaml can be updated, see the migration documentation for more details.`) @@ -91,42 +120,309 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m // the workspace entirely, and just act as if the buf.work.yaml did not exist. logger.Debug( "targeting workspace, ignoring v1 buf.work.yaml, just building on module at target", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input path", bucketTargeting.InputPath()), + ) + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputPath()}, // Assume we are targeting only the module at the input path + nil, ) - return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, - }, nil } - logger.Debug( - "targeting workspace based on v1 buf.work.yaml", - zap.String("targetSubDirPath", targetSubDirPath), - zap.Strings("bufWorkYAMLDirPaths", bufWorkYAMLDirPaths), + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + bufWorkYAMLFile.DirPaths(), + nil, ) - return &workspaceTargeting{ - v1DirPaths: bufWorkYAMLDirPaths, - }, nil } - logger.Debug( - "targeting workspace based on v2 buf.yaml", - zap.String("targetSubDirPath", targetSubDirPath), - ) - // We have a v2 buf.yaml. - return &workspaceTargeting{ - v2DirPath: ".", - }, nil } - logger.Debug( "targeting workspace with no found buf.work.yaml or buf.yaml", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input path", bucketTargeting.InputPath()), ) // We did not find any buf.work.yaml or buf.yaml, operate as if a - // default v1 buf.yaml was at config.targetSubDirPath. + // default v1 buf.yaml was at bucketTargeting.InputPath(). + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputPath()}, + nil, + ) +} + +func v2WorkspaceTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + bufYAMLFile bufconfig.BufYAMLFile, +) (*workspaceTargeting, error) { + workspaceDirPath := bucketTargeting.InputPath() + if controllingWorkspace := bucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { + workspaceDirPath = controllingWorkspace.Path() + } + bucket = storage.MapReadBucket(bucket, storage.MapOnPrefix(workspaceDirPath)) + // We keep track of if any module was tentatively targeted, and then actually targeted via + // the paths flags. We use this pre-building of the ModuleSet to see if the --path and + // --exclude-path flags resulted in no targeted modules. This condition is represented + // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false + // + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputPath() was not + // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml + // directories, and this is a system error - this should be verified before we reach this function. + var hadIsTentativelyTargetModule bool + var hadIsTargetModule bool + moduleDirPaths := make([]string, 0, len(bufYAMLFile.ModuleConfigs())) + bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) + moduleBucketsAndTargeting := make([]*moduleBucketAndModuleTargeting, 0, len(bufYAMLFile.ModuleConfigs())) + for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { + moduleDirPath := moduleConfig.DirPath() + moduleDirPaths = append(moduleDirPaths, moduleDirPath) + bucketIDToModuleConfig[moduleDirPath] = moduleConfig + // bucketTargeting.InputPath() is the input targetSubDirPath. We only want to target modules that are inside + // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", + // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" + // and "foo/bar/bat". + // + // This is new behavior - before, we required that you input an exact match for the module directory path, + // but now, we will take all the modules underneath this workspace. + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + if isTentativelyTargetModule { + hadIsTentativelyTargetModule = true + } + mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( + ctx, + config, + bucket, + bucketTargeting, + moduleDirPath, + moduleConfig, + isTentativelyTargetModule, + ) + if err != nil { + return nil, err + } + if moduleTargeting.isTargetModule { + hadIsTargetModule = true + } + moduleBucketsAndTargeting = append(moduleBucketsAndTargeting, &moduleBucketAndModuleTargeting{ + bucket: mappedModuleBucket, + moduleTargeting: moduleTargeting, + }) + } + if !hadIsTentativelyTargetModule { + return nil, fmt.Errorf("input path %q did not contain modules found in workspace %v", bucketTargeting.InputPath(), moduleDirPaths) + } + if !hadIsTargetModule { + // It would be nice to have a better error message than this in the long term. + return nil, bufmodule.ErrNoTargetProtoFiles + } return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, + v2: &v2Targeting{ + bufYAMLFile: bufYAMLFile, + bucketIDToModuleConfig: bucketIDToModuleConfig, + moduleBucketsAndTargeting: moduleBucketsAndTargeting, + }, }, nil } -func (w *workspaceTargeting) isV2() bool { - return w.v2DirPath != "" +func v1WorkspaceTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + moduleDirPaths []string, + overrideBufYAMLFile bufconfig.BufYAMLFile, +) (*workspaceTargeting, error) { + // We keep track of if any module was tentatively targeted, and then actually targeted via + // the paths flags. We use this pre-building of the ModuleSet to see if the --path and + // --exclude-path flags resulted in no targeted modules. This condition is represented + // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false + // + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputPath() was not + // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml + // directories, and this is a system error - this should be verified before we reach this function. + var hadIsTentativelyTargetModule bool + var hadIsTargetModule bool + var allConfiguredDepModuleRefs []bufmodule.ModuleRef + bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) + // We use this to detect different refs across different files. + moduleFullNameStringToConfiguredDepModuleRefString := make(map[string]string) + moduleBucketsAndTargeting := make([]*moduleBucketAndModuleTargeting, 0, len(moduleDirPaths)) + for _, moduleDirPath := range moduleDirPaths { + moduleConfig, configuredDepModuleRefs, err := getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( + ctx, + bucket, + moduleDirPath, + overrideBufYAMLFile, + ) + if err != nil { + return nil, err + } + for _, configuredDepModuleRef := range configuredDepModuleRefs { + moduleFullNameString := configuredDepModuleRef.ModuleFullName().String() + configuredDepModuleRefString := configuredDepModuleRef.String() + existingConfiguredDepModuleRefString, ok := moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] + if !ok { + // We haven't encountered a ModuleRef with this ModuleFullName yet, add it. + allConfiguredDepModuleRefs = append(allConfiguredDepModuleRefs, configuredDepModuleRef) + moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] = configuredDepModuleRefString + } else if configuredDepModuleRefString != existingConfiguredDepModuleRefString { + // We encountered the same ModuleRef by ModuleFullName, but with a different Ref. + return nil, fmt.Errorf("found different refs for the same module within buf.yaml deps in the workspace: %s %s", configuredDepModuleRefString, existingConfiguredDepModuleRefString) + } + } + bucketIDToModuleConfig[moduleDirPath] = moduleConfig + // We only want to target modules that are inside the bucketTargeting.InputPath(). + // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputPath() is "foo/bar", + // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" + // and "foo/bar/bat". + // + // This is new behavior - before, we required that you input an exact match for the module directory path, + // but now, we will take all the modules underneath this workspace. + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + if isTentativelyTargetModule { + hadIsTentativelyTargetModule = true + } + mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( + ctx, + config, + bucket, + bucketTargeting, + moduleDirPath, + moduleConfig, + isTentativelyTargetModule, + ) + if err != nil { + return nil, err + } + if moduleTargeting.isTargetModule { + hadIsTargetModule = true + } + moduleBucketsAndTargeting = append(moduleBucketsAndTargeting, &moduleBucketAndModuleTargeting{ + bucket: mappedModuleBucket, + moduleTargeting: moduleTargeting, + }) + } + if !hadIsTentativelyTargetModule { + return nil, syserror.Newf("subDirPath %q did not result in any target modules from moduleDirPaths %v", bucketTargeting.InputPath(), moduleDirPaths) + } + if !hadIsTargetModule { + // It would be nice to have a better error message than this in the long term. + return nil, bufmodule.ErrNoTargetProtoFiles + } + return &workspaceTargeting{ + v1: &v1Targeting{ + bucketIDToModuleConfig: bucketIDToModuleConfig, + moduleBucketsAndTargeting: moduleBucketsAndTargeting, + allConfiguredDepModuleRefs: allConfiguredDepModuleRefs, + }, + }, nil +} + +func getMappedModuleBucketAndModuleTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + moduleDirPath string, + moduleConfig bufconfig.ModuleConfig, + isTargetModule bool, +) (storage.ReadBucket, *moduleTargeting, error) { + moduleBucket := storage.MapReadBucket( + bucket, + storage.MapOnPrefix(moduleDirPath), + ) + rootToExcludes := moduleConfig.RootToExcludes() + var rootBuckets []storage.ReadBucket + for root, excludes := range rootToExcludes { + // Roots only applies to .proto files. + mappers := []storage.Mapper{ + // need to do match extension here + // https://github.com/bufbuild/buf/issues/113 + storage.MatchPathExt(".proto"), + storage.MapOnPrefix(root), + } + if len(excludes) != 0 { + var notOrMatchers []storage.Matcher + for _, exclude := range excludes { + notOrMatchers = append( + notOrMatchers, + storage.MatchPathContained(exclude), + ) + } + mappers = append( + mappers, + storage.MatchNot( + storage.MatchOr( + notOrMatchers..., + ), + ), + ) + } + rootBuckets = append( + rootBuckets, + storage.MapReadBucket( + moduleBucket, + mappers..., + ), + ) + } + rootBuckets = append( + rootBuckets, + bufmodule.GetDocStorageReadBucket(ctx, moduleBucket), + bufmodule.GetLicenseStorageReadBucket(moduleBucket), + ) + mappedModuleBucket := storage.MultiReadBucket(rootBuckets...) + moduleTargeting, err := newModuleTargeting( + moduleDirPath, + slicesext.MapKeysToSlice(rootToExcludes), + bucketTargeting, + config, + isTargetModule, + ) + if err != nil { + return nil, nil, err + } + return mappedModuleBucket, moduleTargeting, nil +} + +func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( + ctx context.Context, + bucket storage.ReadBucket, + moduleDirPath string, + overrideBufYAMLFile bufconfig.BufYAMLFile, +) (bufconfig.ModuleConfig, []bufmodule.ModuleRef, error) { + var bufYAMLFile bufconfig.BufYAMLFile + var err error + if overrideBufYAMLFile != nil { + bufYAMLFile = overrideBufYAMLFile + } else { + bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, moduleDirPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // If we do not have a buf.yaml, we use the default config. + // This is a v1 config. + return bufconfig.DefaultModuleConfigV1, nil, nil + } + return nil, nil, err + } + } + // Just a sanity check. This should have already been validated, but let's make sure. + if bufYAMLFile.FileVersion() != bufconfig.FileVersionV1Beta1 && bufYAMLFile.FileVersion() != bufconfig.FileVersionV1 { + return nil, nil, syserror.Newf("buf.yaml at %s did not have version v1beta1 or v1", moduleDirPath) + } + moduleConfigs := bufYAMLFile.ModuleConfigs() + if len(moduleConfigs) != 1 { + // This is a system error. This should never happen. + return nil, nil, syserror.Newf("received %d ModuleConfigs from a v1beta1 or v1 BufYAMLFile", len(moduleConfigs)) + } + return moduleConfigs[0], bufYAMLFile.ConfiguredDepModuleRefs(), nil } diff --git a/private/buf/bufworkspace/workspace_test.go b/private/buf/bufworkspace/workspace_test.go index 8c899de677..949631e818 100644 --- a/private/buf/bufworkspace/workspace_test.go +++ b/private/buf/bufworkspace/workspace_test.go @@ -20,6 +20,7 @@ import ( "io/fs" "testing" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduletesting" "github.com/bufbuild/buf/private/pkg/dag/dagtest" @@ -28,6 +29,7 @@ import ( "github.com/bufbuild/buf/private/pkg/tracing" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) func TestBasicV1(t *testing.T) { @@ -60,12 +62,24 @@ func testBasic(t *testing.T, subDirPath string) { bucket, err := storageosProvider.NewReadWriteBucket(normalpath.Join("testdata/basic", subDirPath)) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + "finance/portfolio/proto", + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputPath()) + require.NoError(t, err) + workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, bucket, - WithTargetSubDirPath( - "finance/portfolio/proto", - ), + bucketTargeting, ) require.NoError(t, err) module := workspace.GetModuleForOpaqueID("buf.testing/acme/bond") @@ -127,23 +141,29 @@ func testBasic(t *testing.T, subDirPath string) { _, err = module.StatFileInfo(ctx, "LICENSE") require.NoError(t, err) - //malformedDeps, err := MalformedDepsForWorkspace(workspace) - //require.NoError(t, err) - //require.Equal(t, 1, len(malformedDeps)) - //malformedDep := malformedDeps[0] - //require.Equal(t, "buf.testing/acme/extension", malformedDep.ModuleFullName().String()) - //require.Equal(t, MalformedDepTypeUndeclared, malformedDep.Type()) + bucketTargeting, err = buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + "common/money/proto", + []string{"common/money/proto/acme/money/v1/currency_code.proto"}, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "common/money/proto", bucketTargeting.InputPath()) + require.Equal( + t, + []string{"common/money/proto/acme/money/v1/currency_code.proto"}, + bucketTargeting.TargetPaths(), + ) workspace, err = workspaceProvider.GetWorkspaceForBucket( ctx, bucket, - WithTargetSubDirPath( - "common/money/proto", - ), - WithTargetPaths( - []string{"common/money/proto/acme/money/v1/currency_code.proto"}, - nil, - ), + bucketTargeting, ) require.NoError(t, err) module = workspace.GetModuleForOpaqueID("buf.testing/acme/money") @@ -177,10 +197,24 @@ func TestUnusedDep(t *testing.T) { storageosProvider := storageos.NewProvider() bucket, err := storageosProvider.NewReadWriteBucket("testdata/basic/workspace_unused_dep") require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + ".", + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, ".", bucketTargeting.InputPath()) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, bucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index 700e1cfb16..59b3251f83 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -241,8 +241,6 @@ func TestFail6(t *testing.T) { func TestFail7(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() testRunStdout( t, nil, @@ -336,8 +334,6 @@ func TestFail10(t *testing.T) { func TestFail11(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() testRunStdout( t, nil, @@ -1299,8 +1295,6 @@ func TestExportExcludeImports(t *testing.T) { func TestExportPaths(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() tempDir := t.TempDir() testRunStdout( t, @@ -1326,8 +1320,6 @@ func TestExportPaths(t *testing.T) { func TestExportPathsAndExcludes(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() tempDir := t.TempDir() testRunStdout( t, @@ -1489,7 +1481,7 @@ func TestBuildWithPaths(t *testing.T) { ``, // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "build", filepath.Join("testdata", "paths"), "--path", @@ -1521,7 +1513,7 @@ func TestLintWithPaths(t *testing.T) { "", // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "lint", filepath.Join("testdata", "paths"), "--path", diff --git a/private/buf/cmd/buf/command/generate/generate_unix_test.go b/private/buf/cmd/buf/command/generate/generate_unix_test.go index 23340739fd..dbb42d7ee4 100644 --- a/private/buf/cmd/buf/command/generate/generate_unix_test.go +++ b/private/buf/cmd/buf/command/generate/generate_unix_test.go @@ -26,8 +26,6 @@ import ( ) func TestProtoFileRef(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() testRunSuccess( @@ -79,8 +77,6 @@ func TestOutputWithExclude(t *testing.T) { } func TestOutputWithPathWithinExclude(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() testRunSuccess( @@ -128,8 +124,6 @@ func TestOutputWithExcludeWithinPath(t *testing.T) { } func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() testRunSuccess( @@ -166,8 +160,6 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { } func TestWorkspaceGenerateWithExcludeAndTargetPaths(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() testRunSuccess( diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index 56cb4875c8..b50273f017 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1383,7 +1383,7 @@ func TestWorkspaceWithInvalidArchiveAbsolutePathFail(t *testing.T) { 1, ``, filepath.FromSlash(fmt.Sprintf( - `Failure: %s/proto/rpc.proto: absolute paths cannot be used for this input type`, + `Failure: %s/proto/rpc.proto: expected to be relative`, wd, )), "lint", @@ -1402,7 +1402,7 @@ func TestWorkspaceWithInvalidArchiveAbsolutePathFail(t *testing.T) { 1, ``, filepath.FromSlash(fmt.Sprintf( - `Failure: %s/proto/rpc.proto: absolute paths cannot be used for this input type`, + `Failure: %s/proto/rpc.proto: expected to be relative`, wd, )), "lint", diff --git a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go index 77f676298c..66dfe0071d 100644 --- a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go +++ b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go @@ -172,8 +172,6 @@ func TestWorkspaceSubDirectory(t *testing.T) { func TestWorkspaceOverlapSubDirectory(t *testing.T) { // Specify an overlapping input in a sub-directory. t.Parallel() - // TODO failing test - t.Skip() testRunStdoutStderr( t, nil, diff --git a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go index 7c795db4d5..49ca07a19a 100644 --- a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go +++ b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/buf/bufworkspace" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufanalysis/bufanalysistesting" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) func TestRunBreakingEnumNoDelete(t *testing.T) { @@ -753,6 +755,7 @@ func testBreaking( ) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + logger := zaptest.NewLogger(t) previousDirPath := filepath.Join("testdata_previous", relDirPath) dirPath := filepath.Join("testdata", relDirPath) @@ -763,11 +766,31 @@ func testBreaking( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + previousBucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + previousReadWriteBucket, + previousDirPath, + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) readWriteBucket, err := storageosProvider.NewReadWriteBucket( dirPath, storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + readWriteBucket, + dirPath, + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) workspaceProvider := bufworkspace.NewWorkspaceProvider( zap.NewNop(), @@ -779,11 +802,13 @@ func testBreaking( previousWorkspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, previousReadWriteBucket, + previousBucketTargeting, ) require.NoError(t, err) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, readWriteBucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/bufpkg/bufcheck/buflint/buflint_test.go b/private/bufpkg/bufcheck/buflint/buflint_test.go index 8a48f4fc05..4931415ff8 100644 --- a/private/bufpkg/bufcheck/buflint/buflint_test.go +++ b/private/bufpkg/bufcheck/buflint/buflint_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/buf/bufworkspace" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufanalysis/bufanalysistesting" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) // Hint on how to get these: @@ -1167,6 +1169,16 @@ func testLintWithOptions( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + readWriteBucket, + dirPath, + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) workspace, err := bufworkspace.NewWorkspaceProvider( zap.NewNop(), tracing.NopTracer, @@ -1176,6 +1188,7 @@ func testLintWithOptions( ).GetWorkspaceForBucket( ctx, readWriteBucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/bufpkg/bufconfig/terminate.go b/private/bufpkg/bufconfig/terminate.go deleted file mode 100644 index 014f902de5..0000000000 --- a/private/bufpkg/bufconfig/terminate.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bufconfig - -import ( - "context" - "errors" - "fmt" - "io/fs" - - "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/slicesext" - "github.com/bufbuild/buf/private/pkg/storage" -) - -// FindControllingWorkspaceResult is a result from FindControllingWorkspace. -type FindControllingWorkspaceResult interface { - // If true, a controlling workspace was found. - Found() bool - // If non-empty, this indicates that the found workspace was a buf.work.yaml, - // and the directory paths were these paths. - // - // These paths include the input prefix, and are the full paths to the directories - // within the input bucket. - // - // If empty, this indicates that the found workspace was a buf.yaml. - BufWorkYAMLDirPaths() []string - - isFindControllingWorkspaceResult() -} - -// FindControllingWorkspace searches for a workspace file at prefix that controls originalSubDirPath. -// -// # A workspace file is either a buf.work.yaml file or a v2 buf.yaml file -// -// The workspace file controls originalSubDirPath if either: -// 1. prefix == originalSubDirPath, that is we're just directly targeting originalSubDirPath. -// 3. The workspace file refers to the originalSubDirPath via "directories" in buf.work.yaml or "directory" in buf.yaml. -// -// This is used by both buffetch/internal.Reader via the Prefix functions, and NewWorkspaceForBucket, -// which do their own independent searches and do not depend on each other. -func FindControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (FindControllingWorkspaceResult, error) { - return findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, true) -} - -// TerminateForNonProtoFileRef returns true if the bucket contains a workspace file -// that controls originalSubDirPath at the prefix. -// -// See the commentary on FindControllingWorkspace. -// -// This is used by buffetch when searching for the root of the workspace. -// See buffetch/internal.WithGetBucketTerminateFunc for more information. -func TerminateAtControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (bool, error) { - findControllingWorkspaceResult, err := findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, true) - if err != nil { - return false, err - } - return findControllingWorkspaceResult.Found(), nil -} - -// TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef returns true if the bucket contains -// either a module file or workspace file at the prefix. -// -// A module file is either a v1 or v1beta1 buf.yaml or buf.mod file, or a v2 buf.yaml file that -// has a module with directory ".". This is configuration for a module rooted at this directory. -// -// A workspace file is either a buf.work.yaml file or a v2 buf.yaml file. -// -// As opposed to TerminateForNonProtoFileRef, this does not require the prefix to point to the -// originalSubDirPath - ProtoFileRefs assume that if you have a workspace file, it controls the ProtoFileRef. -// -// This is used by buffetch when searching for the root of the module when dealing with ProtoFileRefs. -// See buffetch/internal.WithGetBucketProtoFileTerminateFunc for more information. -func TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (bool, error) { - findControllingWorkspaceResult, err := findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, false) - if err != nil { - return false, err - } - if findControllingWorkspaceResult.Found() { - return true, nil - } - - bufYAMLFile, err := GetBufYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return false, nil - } - return false, err - } - switch fileVersion := bufYAMLFile.FileVersion(); fileVersion { - case FileVersionV1, FileVersionV1Beta1: - // If we have a v1 or v1beta1 buf.yaml, we automatically know this is a module file. - return true, nil - case FileVersionV2: - // If we have a v2, this is only a "module file" if it contains a module with directory ".". - // Otherwise, we don't want to stop here. Remember, this only wants to stop at module files, - // not workspace files. - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - if moduleConfig.DirPath() == "." { - return true, nil - } - } - return false, nil - default: - return false, fmt.Errorf("unknown FileVersion: %v", fileVersion) - } -} - -// *** PRIVATE *** - -type findControllingWorkspaceResult struct { - found bool - bufWorkYAMLDirPaths []string -} - -func newFindControllingWorkspaceResult( - found bool, - bufWorkYAMLDirPaths []string, -) *findControllingWorkspaceResult { - return &findControllingWorkspaceResult{ - found: found, - bufWorkYAMLDirPaths: bufWorkYAMLDirPaths, - } -} - -func (f *findControllingWorkspaceResult) Found() bool { - return f.found -} - -func (f *findControllingWorkspaceResult) BufWorkYAMLDirPaths() []string { - return f.bufWorkYAMLDirPaths -} - -func (*findControllingWorkspaceResult) isFindControllingWorkspaceResult() {} - -// findControllingWorkspace adds the property that the prefix may not be required to point to originalSubDirPath. -// -// We don't require the workspace file to point to originalSubDirPath when finding the enclosing module or -// workspace for a ProtoFileRef. -func findControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, - requirePrefixWorkspaceToPointToOriginalSubDirPath bool, -) (FindControllingWorkspaceResult, error) { - bufWorkYAMLFile, err := GetBufWorkYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, err - } - bufWorkYAMLExists := err == nil - bufYAMLFile, err := GetBufYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, err - } - bufYAMLExists := err == nil - if bufWorkYAMLExists && bufYAMLExists { - // This isn't actually the external directory path, but we do the best we can here for now. - return nil, fmt.Errorf("cannot have a buf.work.yaml and buf.yaml in the same directory %q", prefix) - } - - // Find the relative path of our original target subDirPath vs where we currently are. - // We only stop the loop if a v2 buf.yaml or a buf.work.yaml lists this directory, - // or if the original target subDirPath points ot the workspace file itself. - // - // Example: we inputted foo/bar/baz, we're currently at foo. We want to make sure - // that buf.work.yaml lists bar/baz as a directory. If so, this buf.work.yaml - // relates to our current directory. - // - // Example: we inputted foo/bar/baz, we're at foo/bar/baz. Great. - relDirPath, err := normalpath.Rel(prefix, originalSubDirPath) - if err != nil { - return nil, err - } - if bufYAMLExists && bufYAMLFile.FileVersion() == FileVersionV2 { - if !requirePrefixWorkspaceToPointToOriginalSubDirPath { - // We don't require the workspace to point to the prefix (likely because we're - // finding the controlling workspace for a ProtoFileRef), we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - if prefix == originalSubDirPath { - // We've referred to our workspace file directly, we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - dirPathMap := make(map[string]struct{}) - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - dirPathMap[moduleConfig.DirPath()] = struct{}{} - } - if _, ok := dirPathMap[relDirPath]; ok { - // This workspace file refers to curDirPath, we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - } - if bufWorkYAMLExists { - _, refersToCurDirPath := slicesext.ToStructMap(bufWorkYAMLFile.DirPaths())[relDirPath] - if prefix == originalSubDirPath || refersToCurDirPath || !requirePrefixWorkspaceToPointToOriginalSubDirPath { - // We don't actually need to parse the buf.work.yaml again - we have all the information - // we need. Just figure out the actual paths within the bucket of the modules, and go - // right to newWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1. - moduleDirPaths := make([]string, len(bufWorkYAMLFile.DirPaths())) - for i, dirPath := range bufWorkYAMLFile.DirPaths() { - moduleDirPaths[i] = normalpath.Join(prefix, dirPath) - } - return newFindControllingWorkspaceResult(true, moduleDirPaths), nil - } - } - return newFindControllingWorkspaceResult(false, nil), nil -} diff --git a/private/bufpkg/bufimage/build_image.go b/private/bufpkg/bufimage/build_image.go index 24b4a035c1..8fa5b6b4e8 100644 --- a/private/bufpkg/bufimage/build_image.go +++ b/private/bufpkg/bufimage/build_image.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "strings" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufmodule" @@ -58,6 +59,7 @@ func buildImage( return nil, bufmodule.ErrNoTargetProtoFiles } paths := bufmodule.FileInfoPaths(targetFileInfos) + fmt.Println(strings.Join(paths, "\n")) buildResult := getBuildResult( ctx, diff --git a/private/bufpkg/bufmodule/module_dep.go b/private/bufpkg/bufmodule/module_dep.go index 6b55351278..9af13b475c 100644 --- a/private/bufpkg/bufmodule/module_dep.go +++ b/private/bufpkg/bufmodule/module_dep.go @@ -97,7 +97,6 @@ func getModuleDeps( if err := protoFileTracker.validate(); err != nil { return nil, err } - moduleDeps := make([]ModuleDep, 0, len(depOpaqueIDToModuleDep)) for _, moduleDep := range depOpaqueIDToModuleDep { moduleDeps = append(moduleDeps, moduleDep) diff --git a/private/bufpkg/bufmodule/module_set_builder.go b/private/bufpkg/bufmodule/module_set_builder.go index 9a55ba4435..bbc78ac248 100644 --- a/private/bufpkg/bufmodule/module_set_builder.go +++ b/private/bufpkg/bufmodule/module_set_builder.go @@ -409,6 +409,7 @@ func (b *moduleSetBuilder) Build() (_ ModuleSet, retErr error) { addedModules, func(addedModule *addedModule) (Module, error) { // This context ends up getting stored on the Module, is this a problem re: tracing? + // doria: yes. return addedModule.ToModule(ctx, b.moduleDataProvider) }, ) diff --git a/private/bufpkg/bufmodule/proto_file_tracker.go b/private/bufpkg/bufmodule/proto_file_tracker.go index 46a1a9fc47..4657212655 100644 --- a/private/bufpkg/bufmodule/proto_file_tracker.go +++ b/private/bufpkg/bufmodule/proto_file_tracker.go @@ -71,6 +71,7 @@ func (t *protoFileTracker) validate() error { var noProtoFilesErrors []*NoProtoFilesError for opaqueID, protoFileExists := range t.opaqueIDToProtoFileExists { if !protoFileExists { + panic("#########") noProtoFilesErrors = append( noProtoFilesErrors, &NoProtoFilesError{ From 82e1a76b7e09461a604f49230b4037bf8a0d6150 Mon Sep 17 00:00:00 2001 From: bufdev Date: Wed, 28 Feb 2024 14:08:28 -0500 Subject: [PATCH 04/29] fix --- private/buf/buftarget/terminate.go | 15 +++++++++------ private/buf/bufworkspace/module_targeting.go | 3 +-- private/buf/bufworkspace/workspace_targeting.go | 5 ----- private/buf/cmd/buf/workspace_unix_test.go | 4 ++-- private/bufpkg/bufimage/build_image.go | 2 -- private/bufpkg/bufmodule/module_read_bucket.go | 9 ++++++++- private/bufpkg/bufmodule/module_set_builder.go | 2 -- private/bufpkg/bufmodule/proto_file_tracker.go | 1 - 8 files changed, 20 insertions(+), 21 deletions(-) diff --git a/private/buf/buftarget/terminate.go b/private/buf/buftarget/terminate.go index 1893e3e2f5..2bc8d8fc64 100644 --- a/private/buf/buftarget/terminate.go +++ b/private/buf/buftarget/terminate.go @@ -22,7 +22,6 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" ) @@ -75,15 +74,19 @@ func terminateAtControllingWorkspace( return newControllingWorkspace(prefix, nil, bufYAMLFile), nil } if bufWorkYAMLExists { - // For v1 workspaces, we ensure that the module paths lists actually contain the the - // original input paths. If not, then we do not use this workspace. + // For v1 workspaces, we ensure that the module path list actually contains the original + // input paths. + if prefix == originalInputPath { + return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + } relDirPath, err := normalpath.Rel(prefix, originalInputPath) if err != nil { return nil, err } - _, refersToCurDirPath := slicesext.ToStructMap(bufWorkYAMLFile.DirPaths())[relDirPath] - if prefix == originalInputPath || refersToCurDirPath { - return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + for _, dirPath := range bufWorkYAMLFile.DirPaths() { + if normalpath.EqualsOrContainsPath(dirPath, relDirPath, normalpath.Relative) { + return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + } } } return nil, nil diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 300b9f38c4..7fea813d6a 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -57,8 +57,7 @@ func newModuleTargeting( // If we have no target paths, then we always match the value of isTargetModule. // Otherwise, we need to see that at least one path matches the moduleDirPath for us // to consider this module a target. - // TODO(doria): deal with proto file ref appropriately - isTargetModule := len(bucketTargeting.TargetPaths()) == 0 && config.protoFileTargetPath == "" + isTargetModule := len(bucketTargeting.TargetPaths()) == 0 var moduleTargetPaths []string var moduleTargetExcludePaths []string // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index c5dfa8638d..776d4e4fe8 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -164,11 +164,6 @@ func v2WorkspaceTargeting( bucketTargeting buftarget.BucketTargeting, bufYAMLFile bufconfig.BufYAMLFile, ) (*workspaceTargeting, error) { - workspaceDirPath := bucketTargeting.InputPath() - if controllingWorkspace := bucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { - workspaceDirPath = controllingWorkspace.Path() - } - bucket = storage.MapReadBucket(bucket, storage.MapOnPrefix(workspaceDirPath)) // We keep track of if any module was tentatively targeted, and then actually targeted via // the paths flags. We use this pre-building of the ModuleSet to see if the --path and // --exclude-path flags resulted in no targeted modules. This condition is represented diff --git a/private/buf/cmd/buf/workspace_unix_test.go b/private/buf/cmd/buf/workspace_unix_test.go index fc9776d697..8cdfa31200 100644 --- a/private/buf/cmd/buf/workspace_unix_test.go +++ b/private/buf/cmd/buf/workspace_unix_test.go @@ -175,7 +175,7 @@ func TestWorkspaceGit(t *testing.T) { nil, bufctl.ExitCodeFileAnnotation, filepath.FromSlash(`private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Files with package "example" must be within a directory "example" relative to root but were in directory ".". - private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), + private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), "lint", "../../../../.git#ref=HEAD,subdir=private/buf/cmd/buf/testdata/workspace/success/dir/proto", ) @@ -184,7 +184,7 @@ func TestWorkspaceGit(t *testing.T) { nil, bufctl.ExitCodeFileAnnotation, filepath.FromSlash(`private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Files with package "example" must be within a directory "example" relative to root but were in directory ".". - private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), + private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), "lint", "../../../../.git#ref=HEAD,subdir=private/buf/cmd/buf/testdata/workspace/success/dir/proto", "--path", diff --git a/private/bufpkg/bufimage/build_image.go b/private/bufpkg/bufimage/build_image.go index 8fa5b6b4e8..24b4a035c1 100644 --- a/private/bufpkg/bufimage/build_image.go +++ b/private/bufpkg/bufimage/build_image.go @@ -18,7 +18,6 @@ import ( "context" "errors" "fmt" - "strings" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufmodule" @@ -59,7 +58,6 @@ func buildImage( return nil, bufmodule.ErrNoTargetProtoFiles } paths := bufmodule.FileInfoPaths(targetFileInfos) - fmt.Println(strings.Join(paths, "\n")) buildResult := getBuildResult( ctx, diff --git a/private/bufpkg/bufmodule/module_read_bucket.go b/private/bufpkg/bufmodule/module_read_bucket.go index 6c3ce98cb1..2ac8f9e66e 100644 --- a/private/bufpkg/bufmodule/module_read_bucket.go +++ b/private/bufpkg/bufmodule/module_read_bucket.go @@ -331,7 +331,6 @@ func (b *moduleReadBucket) WalkFileInfos( // Note that we must verify that at least one file in this ModuleReadBucket is // a .proto file, per the documentation on Module. protoFileTracker := newProtoFileTracker() - protoFileTracker.trackModule(b.module) walkFileInfosOptions := newWalkFileInfosOptions() for _, option := range options { @@ -343,6 +342,10 @@ func (b *moduleReadBucket) WalkFileInfos( } if !walkFileInfosOptions.onlyTargetFiles { + // We only want to call trackModule if we are walking all the files, not just + // the target files. By not calling trackModule outside of this if statement, + // we will not produce NoProtoFilesErrors, per the documention on trackModule. + protoFileTracker.trackModule(b.module) if err := bucket.Walk( ctx, "", @@ -359,6 +362,10 @@ func (b *moduleReadBucket) WalkFileInfos( } return protoFileTracker.validate() } + // If we are walking target files, then we track the module if it is the target module + if b.module.IsTarget() { + protoFileTracker.trackModule(b.module) + } targetFileWalkFunc := func(objectInfo storage.ObjectInfo) error { fileInfo, err := b.getFileInfo(ctx, objectInfo) diff --git a/private/bufpkg/bufmodule/module_set_builder.go b/private/bufpkg/bufmodule/module_set_builder.go index bbc78ac248..5bc4f1be79 100644 --- a/private/bufpkg/bufmodule/module_set_builder.go +++ b/private/bufpkg/bufmodule/module_set_builder.go @@ -408,8 +408,6 @@ func (b *moduleSetBuilder) Build() (_ ModuleSet, retErr error) { modules, err := slicesext.MapError( addedModules, func(addedModule *addedModule) (Module, error) { - // This context ends up getting stored on the Module, is this a problem re: tracing? - // doria: yes. return addedModule.ToModule(ctx, b.moduleDataProvider) }, ) diff --git a/private/bufpkg/bufmodule/proto_file_tracker.go b/private/bufpkg/bufmodule/proto_file_tracker.go index 4657212655..46a1a9fc47 100644 --- a/private/bufpkg/bufmodule/proto_file_tracker.go +++ b/private/bufpkg/bufmodule/proto_file_tracker.go @@ -71,7 +71,6 @@ func (t *protoFileTracker) validate() error { var noProtoFilesErrors []*NoProtoFilesError for opaqueID, protoFileExists := range t.opaqueIDToProtoFileExists { if !protoFileExists { - panic("#########") noProtoFilesErrors = append( noProtoFilesErrors, &NoProtoFilesError{ From 8e3586926b244ba3898d316d1695c193c6d1d400 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Wed, 28 Feb 2024 16:30:11 -0500 Subject: [PATCH 05/29] Clean up ProtoFileRef handling --- private/buf/buffetch/internal/reader_test.go | 28 ++--- private/buf/bufworkspace/module_targeting.go | 126 ++++++++++--------- 2 files changed, 82 insertions(+), 72 deletions(-) diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index 06f91b8b08..c9ef533da4 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -250,14 +250,14 @@ func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testin zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "proto", readBucketCloser.SubDirPath()) require.Equal(t, "proto", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -272,14 +272,14 @@ func TestGetReadBucketCloserForOSProtoFileTerminateFileName(t *testing.T) { zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -308,14 +308,14 @@ func TestGetReadBucketCloserForOSProtoFileParentPwd(t *testing.T) { zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -346,14 +346,14 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/proto/foo.proto"), - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/buf.yaml"), filepath.ToSlash(fileInfo.ExternalPath())) @@ -371,14 +371,14 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLTerminateFileName(t *testing. zap.NewNop(), storageos.NewProvider(), "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -404,14 +404,14 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLParentPwd(t *testing.T) { zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -443,14 +443,14 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), - nil, // no target paths - nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) require.Equal(t, "four/five", bucketTargeting.InputPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), fileInfo.ExternalPath()) diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 7fea813d6a..4a0c642246 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -21,12 +21,12 @@ import ( "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/stringutil" + "github.com/bufbuild/buf/private/pkg/syserror" ) type moduleTargeting struct { // Whether this module is really a target module. // - // TODO(doria): protofile ref // False if this was not specified as a target module by the caller. // Also false if there were bucketTargeting.TargetPaths() or bucketTargeting.protoFileTargetPath, but // these paths did not match anything in the module. @@ -57,68 +57,22 @@ func newModuleTargeting( // If we have no target paths, then we always match the value of isTargetModule. // Otherwise, we need to see that at least one path matches the moduleDirPath for us // to consider this module a target. - isTargetModule := len(bucketTargeting.TargetPaths()) == 0 + isTargetModule := len(bucketTargeting.TargetPaths()) == 0 && config.protoFileTargetPath == "" var moduleTargetPaths []string var moduleTargetExcludePaths []string - // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths - // since those are stripped of the path to the module. - for _, targetPath := range bucketTargeting.TargetPaths() { - if targetPath == moduleDirPath { - // We're just going to be realists in our error messages here. - // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, - // if we were really clever, we'd go back and just add this as a module path. - return nil, fmt.Errorf("module %q was specified with --path - specify this module path directly as an input", targetPath) + var moduleProtoFileTargetPath string + var includePackageFiles bool + if config.protoFileTargetPath != "" { + var err error + // We are currently returning the mapped proto file path as a target path in bucketTargeting. + // At the user level, we do not allow setting target paths or exclude paths for proto file + // references, so we do this check here. + if len(bucketTargeting.TargetPaths()) != 1 { + return nil, syserror.New("ProtoFileTargetPath not properly returned with bucketTargeting") } - if normalpath.ContainsPath(moduleDirPath, targetPath, normalpath.Relative) { + if normalpath.ContainsPath(moduleDirPath, bucketTargeting.TargetPaths()[0], normalpath.Relative) { isTargetModule = true - moduleTargetPath, err := normalpath.Rel(moduleDirPath, targetPath) - if err != nil { - return nil, err - } - moduleTargetPaths = append(moduleTargetPaths, moduleTargetPath) - } - } - // We use the bucketTargeting.TargetExcludePaths() instead of the workspace config target - // exclude paths since those are stripped of the path to the module. - for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { - if targetExcludePath == moduleDirPath { - // We're just going to be realists in our error messages here. - // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, - // if we were really clever, we'd go back and just remove this as a module path if it was specified. - // This really should be allowed - how else do you exclude from a workspace? - return nil, fmt.Errorf("module %q was specified with --exclude-path - this flag cannot be used to specify module directories", targetExcludePath) - } - if normalpath.ContainsPath(moduleDirPath, targetExcludePath, normalpath.Relative) { - moduleTargetExcludePath, err := normalpath.Rel(moduleDirPath, targetExcludePath) - if err != nil { - return nil, err - } - moduleTargetExcludePaths = append(moduleTargetExcludePaths, moduleTargetExcludePath) } - } - moduleTargetPaths, err := slicesext.MapError( - moduleTargetPaths, - func(moduleTargetPath string) (string, error) { - return applyRootsToTargetPath(roots, moduleTargetPath, normalpath.Relative) - }, - ) - if err != nil { - return nil, err - } - moduleTargetExcludePaths, err = slicesext.MapError( - moduleTargetExcludePaths, - func(moduleTargetExcludePath string) (string, error) { - return applyRootsToTargetPath(roots, moduleTargetExcludePath, normalpath.Relative) - }, - ) - if err != nil { - return nil, err - } - var moduleProtoFileTargetPath string - var includePackageFiles bool - if config.protoFileTargetPath != "" && - normalpath.ContainsPath(moduleDirPath, config.protoFileTargetPath, normalpath.Relative) { - isTargetModule = true moduleProtoFileTargetPath, err = normalpath.Rel(moduleDirPath, config.protoFileTargetPath) if err != nil { return nil, err @@ -128,6 +82,62 @@ func newModuleTargeting( return nil, err } includePackageFiles = config.includePackageFiles + } else { + var err error + // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths + // since those are stripped of the path to the module. + for _, targetPath := range bucketTargeting.TargetPaths() { + if targetPath == moduleDirPath { + // We're just going to be realists in our error messages here. + // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, + // if we were really clever, we'd go back and just add this as a module path. + return nil, fmt.Errorf("module %q was specified with --path - specify this module path directly as an input", targetPath) + } + if normalpath.ContainsPath(moduleDirPath, targetPath, normalpath.Relative) { + isTargetModule = true + moduleTargetPath, err := normalpath.Rel(moduleDirPath, targetPath) + if err != nil { + return nil, err + } + moduleTargetPaths = append(moduleTargetPaths, moduleTargetPath) + } + } + // We use the bucketTargeting.TargetExcludePaths() instead of the workspace config target + // exclude paths since those are stripped of the path to the module. + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetExcludePath == moduleDirPath { + // We're just going to be realists in our error messages here. + // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, + // if we were really clever, we'd go back and just remove this as a module path if it was specified. + // This really should be allowed - how else do you exclude from a workspace? + return nil, fmt.Errorf("module %q was specified with --exclude-path - this flag cannot be used to specify module directories", targetExcludePath) + } + if normalpath.ContainsPath(moduleDirPath, targetExcludePath, normalpath.Relative) { + moduleTargetExcludePath, err := normalpath.Rel(moduleDirPath, targetExcludePath) + if err != nil { + return nil, err + } + moduleTargetExcludePaths = append(moduleTargetExcludePaths, moduleTargetExcludePath) + } + } + moduleTargetPaths, err = slicesext.MapError( + moduleTargetPaths, + func(moduleTargetPath string) (string, error) { + return applyRootsToTargetPath(roots, moduleTargetPath, normalpath.Relative) + }, + ) + if err != nil { + return nil, err + } + moduleTargetExcludePaths, err = slicesext.MapError( + moduleTargetExcludePaths, + func(moduleTargetExcludePath string) (string, error) { + return applyRootsToTargetPath(roots, moduleTargetExcludePath, normalpath.Relative) + }, + ) + if err != nil { + return nil, err + } } return &moduleTargeting{ moduleDirPath: moduleDirPath, From 84d11c986d9619dae777e8460f47ad65ecfe9161 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Thu, 29 Feb 2024 14:52:31 -0500 Subject: [PATCH 06/29] Add better fallback logic for workspace targeting --- private/buf/buffetch/internal/internal.go | 12 - private/buf/buffetch/internal/reader.go | 68 +++--- private/buf/buftarget/bucket_targeting.go | 4 +- .../buf/buftarget/controlling_workspace.go | 1 - private/buf/buftarget/terminate.go | 48 +++- private/buf/bufworkspace/module_targeting.go | 22 +- private/buf/bufworkspace/option.go | 5 +- .../buf/bufworkspace/workspace_targeting.go | 214 +++++++++++++++++- private/buf/cmd/buf/buf_test.go | 13 +- private/buf/cmd/buf/workspace_test.go | 15 +- .../bufpkg/bufmodule/module_read_bucket.go | 1 + 11 files changed, 305 insertions(+), 98 deletions(-) diff --git a/private/buf/buffetch/internal/internal.go b/private/buf/buffetch/internal/internal.go index 938f907e97..9ef20508df 100644 --- a/private/buf/buffetch/internal/internal.go +++ b/private/buf/buffetch/internal/internal.go @@ -826,18 +826,6 @@ func WithGetReadBucketCloserTerminateFunc(terminateFunc buftarget.TerminateFunc) } } -// WithGetReadBucketCloserProtoFileTerminateFunc is like WithGetReadBucketCloserTerminateFunc, but determines -// where to stop searching for the enclosing module or workspace when given a ProtoFileRef. -// -// See bufconfig.TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef, which is the only thing that uses this. -// This finds the enclosing module or workspace. -// This is only used for ProtoFileRefs. -func WithGetReadBucketCloserProtoFileTerminateFunc(protoFileTerminateFunc buftarget.TerminateFunc) GetReadBucketCloserOption { - return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { - getReadBucketCloserOptions.protoFileTerminateFunc = protoFileTerminateFunc - } -} - // WithGetReadBucketCloserTargetPaths sets the target paths for the bucket targeting information. func WithGetReadBucketCloserTargetPaths(targetPaths []string) GetReadBucketCloserOption { return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index adef66f481..7e57e99de8 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -171,7 +171,7 @@ func (r *reader) GetReadBucketCloser( ctx, container, t, - getReadBucketCloserOptions.protoFileTerminateFunc, + getReadBucketCloserOptions.terminateFunc, ) default: return nil, nil, fmt.Errorf("unknown BucketRef type: %T", bucketRef) @@ -678,29 +678,46 @@ func getReadWriteBucketForOS( // returnSubDirPath: foo // Make bucket on: FS root + controlling workspace (since absolute) // - // If no controlling workspace is found, we use the current working directory. + // If no controlling workspace is found, for absolute paths, close the workspace at the + // input path. For relative paths, we use the current working directory. var bucketPath string var inputPath string bucketTargetPaths := make([]string, len(osRootBucketTargeting.TargetPaths())) bucketTargetExcludePaths := make([]string, len(osRootBucketTargeting.TargetExcludePaths())) - pwd, err := osext.Getwd() - if err != nil { - return nil, nil, err - } - _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) - if err != nil { - return nil, nil, err - } if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { if osRootBucketTargeting.ControllingWorkspace() != nil { bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspace().Path()) + inputPath = osRootBucketTargeting.InputPath() + bucketTargetPaths = osRootBucketTargeting.TargetPaths() + bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { - bucketPath = normalpath.Join(fsRoot, pwdFSRelPath) + bucketPath = normalpath.Join(fsRoot, fsRootInputSubDirPath) + inputPath, err = normalpath.Rel(fsRootInputSubDirPath, fsRootInputSubDirPath) + if err != nil { + return nil, nil, err + } + for i, targetPath := range osRootBucketTargeting.TargetPaths() { + bucketTargetPaths[i], err = normalpath.Rel(fsRootInputSubDirPath, targetPath) + if err != nil { + return nil, nil, err + } + } + for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { + bucketTargetExcludePaths[i], err = normalpath.Rel(fsRootInputSubDirPath, targetExcludePath) + if err != nil { + return nil, nil, err + } + } } - inputPath = osRootBucketTargeting.InputPath() - bucketTargetPaths = osRootBucketTargeting.TargetPaths() - bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { + pwd, err := osext.Getwd() + if err != nil { + return nil, nil, err + } + _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + if err != nil { + return nil, nil, err + } if osRootBucketTargeting.ControllingWorkspace() != nil { bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspace().Path()) if err != nil { @@ -729,20 +746,6 @@ func getReadWriteBucketForOS( } } } - //for i, osRootTargetPath := range osRootBucketTargeting.TargetPaths() { - // targetPath, err := normalpath.Rel(pwdFSRelPath, osRootTargetPath) - // if err != nil { - // return nil, nil, err - // } - // bucketTargetPaths[i] = targetPath - //} - //for i, osRootTargetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { - // targetExcludePath, err := normalpath.Rel(pwdFSRelPath, osRootTargetExcludePath) - // if err != nil { - // return nil, nil, err - // } - // bucketTargetExcludePaths[i] = targetExcludePath - //} } // Now that we've mapped the workspace against the FS root, we recreate the bucket with // at the sub dir path. @@ -860,11 +863,10 @@ func newGetFileOptions() *getFileOptions { } type getReadBucketCloserOptions struct { - terminateFunc buftarget.TerminateFunc - protoFileTerminateFunc buftarget.TerminateFunc - copyToInMemory bool - targetPaths []string - targetExcludePaths []string + terminateFunc buftarget.TerminateFunc + copyToInMemory bool + targetPaths []string + targetExcludePaths []string } func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 6ad834af97..669d6045f1 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -102,7 +102,6 @@ func newBucketTargeting( logger, bucket, inputPath, - inputPath, terminateFunc, ) if err != nil { @@ -153,7 +152,6 @@ func mapControllingWorkspaceAndPath( logger *zap.Logger, bucket storage.ReadBucket, path string, - originalInputPath string, terminateFunc TerminateFunc, ) (ControllingWorkspace, string, error) { path, err := normalpath.NormalizeAndValidate(path) @@ -175,7 +173,7 @@ func mapControllingWorkspaceAndPath( // Instead, we effectively do a do-while loop. curDirPath := path for { - controllingWorkspace, err := terminateFunc(ctx, bucket, curDirPath, originalInputPath) + controllingWorkspace, err := terminateFunc(ctx, bucket, curDirPath, path) if err != nil { return nil, "", err } diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go index eba876c8e8..b73fbbce31 100644 --- a/private/buf/buftarget/controlling_workspace.go +++ b/private/buf/buftarget/controlling_workspace.go @@ -10,7 +10,6 @@ type ControllingWorkspace interface { // Returns a buf.work.yaml file that was found. This is empty if we are retruning a buf.yaml. BufWorkYAMLFile() bufconfig.BufWorkYAMLFile // Returns a buf.yaml that was found. This is empty if we are returning a buf.work.yaml. - // Can be a v1 or v2 buf.yaml. BufYAMLFile() bufconfig.BufYAMLFile } diff --git a/private/buf/buftarget/terminate.go b/private/buf/buftarget/terminate.go index 2bc8d8fc64..cc95f1a796 100644 --- a/private/buf/buftarget/terminate.go +++ b/private/buf/buftarget/terminate.go @@ -35,8 +35,6 @@ type TerminateFunc func( // TerminateAtControllingWorkspace implements a TerminateFunc and returns controlling workspace // if one is found at the given prefix. -// -// All paths must be relative. func TerminateAtControllingWorkspace( ctx context.Context, bucket storage.ReadBucket, @@ -46,6 +44,17 @@ func TerminateAtControllingWorkspace( return terminateAtControllingWorkspace(ctx, bucket, prefix, originalInputPath) } +// TerminateAtV1Module is a special terminate func that returns a controlling workspace with +// a v1 module confiugration if found at the given prefix. +func TerminateAtV1Module( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + return terminateAtV1Module(ctx, bucket, prefix, originalInputPath) +} + // *** PRIVATE *** func terminateAtControllingWorkspace( @@ -68,10 +77,19 @@ func terminateAtControllingWorkspace( // This isn't actually the external directory path, but we do the best we can here for now. return nil, fmt.Errorf("cannot have a buf.work.yaml and buf.yaml in the same directory %q", prefix) } + relDirPath, err := normalpath.Rel(prefix, originalInputPath) + if err != nil { + return nil, err + } if bufYAMLExists && bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { - // We don't require the workspace to point to the prefix (likely because we're - // finding the controlling workspace for a ProtoFileRef), we're good to go. - return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + if prefix == originalInputPath { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { + if normalpath.EqualsOrContainsPath(moduleConfig.DirPath(), relDirPath, normalpath.Relative) { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + } } if bufWorkYAMLExists { // For v1 workspaces, we ensure that the module path list actually contains the original @@ -79,10 +97,6 @@ func terminateAtControllingWorkspace( if prefix == originalInputPath { return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil } - relDirPath, err := normalpath.Rel(prefix, originalInputPath) - if err != nil { - return nil, err - } for _, dirPath := range bufWorkYAMLFile.DirPaths() { if normalpath.EqualsOrContainsPath(dirPath, relDirPath, normalpath.Relative) { return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil @@ -91,3 +105,19 @@ func terminateAtControllingWorkspace( } return nil, nil } + +func terminateAtV1Module( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + bufYAMLFile, err := bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if err == nil && bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + return nil, nil +} diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 4a0c642246..44c069208d 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -58,6 +58,7 @@ func newModuleTargeting( // Otherwise, we need to see that at least one path matches the moduleDirPath for us // to consider this module a target. isTargetModule := len(bucketTargeting.TargetPaths()) == 0 && config.protoFileTargetPath == "" + var moduleTargetPaths []string var moduleTargetExcludePaths []string var moduleProtoFileTargetPath string @@ -70,18 +71,19 @@ func newModuleTargeting( if len(bucketTargeting.TargetPaths()) != 1 { return nil, syserror.New("ProtoFileTargetPath not properly returned with bucketTargeting") } - if normalpath.ContainsPath(moduleDirPath, bucketTargeting.TargetPaths()[0], normalpath.Relative) { + protoFileTargetPath := bucketTargeting.TargetPaths()[0] + if normalpath.ContainsPath(moduleDirPath, protoFileTargetPath, normalpath.Relative) { isTargetModule = true + moduleProtoFileTargetPath, err = normalpath.Rel(moduleDirPath, protoFileTargetPath) + if err != nil { + return nil, err + } + moduleProtoFileTargetPath, err = applyRootsToTargetPath(roots, moduleProtoFileTargetPath, normalpath.Relative) + if err != nil { + return nil, err + } + includePackageFiles = config.includePackageFiles } - moduleProtoFileTargetPath, err = normalpath.Rel(moduleDirPath, config.protoFileTargetPath) - if err != nil { - return nil, err - } - moduleProtoFileTargetPath, err = applyRootsToTargetPath(roots, moduleProtoFileTargetPath, normalpath.Relative) - if err != nil { - return nil, err - } - includePackageFiles = config.includePackageFiles } else { var err error // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths diff --git a/private/buf/bufworkspace/option.go b/private/buf/bufworkspace/option.go index 734218d024..a12b03ab32 100644 --- a/private/buf/bufworkspace/option.go +++ b/private/buf/bufworkspace/option.go @@ -230,10 +230,7 @@ func newWorkspaceBucketConfig(options []WorkspaceBucketOption) (*workspaceBucket return nil, err } if config.protoFileTargetPath != "" { - config.protoFileTargetPath, err = normalpath.NormalizeAndValidate(config.protoFileTargetPath) - if err != nil { - return nil, err - } + config.protoFileTargetPath = normalpath.Normalize(config.protoFileTargetPath) } if len(config.targetPaths) > 0 || len(config.targetExcludePaths) > 0 { if config.protoFileTargetPath != "" { diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 776d4e4fe8..ae4bfec9ab 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -87,7 +87,7 @@ func newWorkspaceTargeting( config, bucket, bucketTargeting, - []string{bucketTargeting.InputPath()}, // we assume a v1 workspace with a single v1 module at the input path + []string{bucketTargeting.InputPath()}, overrideBufYAMLFile, ) case bufconfig.FileVersionV2: @@ -145,15 +145,13 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m "targeting workspace with no found buf.work.yaml or buf.yaml", zap.String("input path", bucketTargeting.InputPath()), ) - // We did not find any buf.work.yaml or buf.yaml, operate as if a - // default v1 buf.yaml was at bucketTargeting.InputPath(). - return v1WorkspaceTargeting( + // We did not find any buf.work.yaml or buf.yaml, we invoke fallback logic. + return fallbackWorkspaceTargeting( ctx, + logger, config, bucket, bucketTargeting, - []string{bucketTargeting.InputPath()}, - nil, ) } @@ -189,6 +187,12 @@ func v2WorkspaceTargeting( // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + // We ignore this check for proto file refs, since the input is considered the directory + // of the proto file reference, which is unlikely to contain a module in its entirety. + // In the future, it would be nice to handle this more elegently. + if config.protoFileTargetPath != "" { + isTentativelyTargetModule = true + } if isTentativelyTargetModule { hadIsTentativelyTargetModule = true } @@ -213,7 +217,10 @@ func v2WorkspaceTargeting( }) } if !hadIsTentativelyTargetModule { - return nil, fmt.Errorf("input path %q did not contain modules found in workspace %v", bucketTargeting.InputPath(), moduleDirPaths) + // Check if the input is overlapping within a module dir path. If so, return a nicer + // error. In the future, we want to remove special treatment for input path, and it + // should be treated just like any target path. + return nil, checkForOverlap(bucketTargeting.InputPath(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -283,6 +290,12 @@ func v1WorkspaceTargeting( // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + // We ignore this check for proto file refs, since the input is considered the directory + // of the proto file reference, which is unlikely to contain a module in its entirety. + // In the future, it would be nice to handle this more elegently. + if config.protoFileTargetPath != "" { + isTentativelyTargetModule = true + } if isTentativelyTargetModule { hadIsTentativelyTargetModule = true } @@ -307,7 +320,10 @@ func v1WorkspaceTargeting( }) } if !hadIsTentativelyTargetModule { - return nil, syserror.Newf("subDirPath %q did not result in any target modules from moduleDirPaths %v", bucketTargeting.InputPath(), moduleDirPaths) + // Check if the input is overlapping within a module dir path. If so, return a nicer + // error. In the future, we want to remove special treatment for input path, and it + // should be treated just like any target path. + return nil, checkForOverlap(bucketTargeting.InputPath(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -322,6 +338,112 @@ func v1WorkspaceTargeting( }, nil } +// fallbackWorkspaceTargeting is the fallback logic when there is no config override or +// controlling workspace discovered through bucket targeting. +// +// 1. We check if there are target paths. If there are no target paths, then we return +// a v1 workspace with the only module directory path as the input, since we have no other +// information to go on. +// +// 2. If there are target paths, then we check from each target path, walking up, if there +// is a workspace or v1 module. +// +// a. If we find nothing, then same fallback as 1. +// b. If we find a v1 or v2 workspace, we ensure that all them resolve to the same workspace. +// c. If we find no workspace, then we can check for v1 module configs. +func fallbackWorkspaceTargeting( + ctx context.Context, + logger *zap.Logger, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, +) (*workspaceTargeting, error) { + // No target paths, return a v1 workspace with a single module dir path at the input + if len(bucketTargeting.TargetPaths()) == 0 { + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputPath()}, + nil, + ) + } + var v1BufWorkYAML bufconfig.BufWorkYAMLFile + var v2BufYAMLFile bufconfig.BufYAMLFile + var controllingWorkspacePath string + var v1ModulePaths []string + for _, targetPath := range bucketTargeting.TargetPaths() { + controllingWorkspaceOrModule, err := checkForControllingWorkspaceOrV1Module( + ctx, + logger, + bucket, + targetPath, + ) + if err != nil { + return nil, err + } + if controllingWorkspaceOrModule != nil { + // v1 workspace found + if bufWorkYAMLFile := controllingWorkspaceOrModule.BufWorkYAMLFile(); bufWorkYAMLFile != nil { + if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { + return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) + } + controllingWorkspacePath = controllingWorkspaceOrModule.Path() + v1BufWorkYAML = bufWorkYAMLFile + continue + } + // v2 workspace or v1 module found + if bufYAMLFile := controllingWorkspaceOrModule.BufYAMLFile(); bufYAMLFile != nil { + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { + return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) + } + controllingWorkspacePath = controllingWorkspaceOrModule.Path() + v2BufYAMLFile = bufYAMLFile + continue + } + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { + v1ModulePaths = append(v1ModulePaths, controllingWorkspaceOrModule.Path()) + } + } + } + } + // We should not have found a controlling workspace and also have a list of v1 modules we + // need to build. + // In the future, we may be able to support multi-building, but for now, we error. We have + // the option to prioritise the workspace here and drop the v1ModulePaths, but that might + // be confusing to users. + if controllingWorkspacePath != "" && len(v1ModulePaths) > 0 { + return nil, fmt.Errorf("found a workspace %q that does not contain all found modules: %v", controllingWorkspacePath, v1ModulePaths) + } + if v2BufYAMLFile != nil { + return v2WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + v2BufYAMLFile, + ) + } + if v1BufWorkYAML != nil { + v1ModulePaths = v1BufWorkYAML.DirPaths() + } + // If we still have no v1 module paths, then we go to the final fallback and set a v1 + // module at the input path. + if len(v1ModulePaths) == 0 { + v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputPath()) + } + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + v1ModulePaths, + nil, + ) +} + func getMappedModuleBucketAndModuleTargeting( ctx context.Context, config *workspaceBucketConfig, @@ -421,3 +543,79 @@ func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( } return moduleConfigs[0], bufYAMLFile.ConfiguredDepModuleRefs(), nil } + +// checkForControllingWorkspaceOrV1Module take a bucket and path, and walks up the bucket +// from the base of the path, checking for a controlling workspace or v1 module. +func checkForControllingWorkspaceOrV1Module( + ctx context.Context, + logger *zap.Logger, + bucket storage.ReadBucket, + path string, +) (buftarget.ControllingWorkspace, error) { + path, err := normalpath.NormalizeAndValidate(path) + if err != nil { + return nil, err + } + // Keep track of any v1 module found along the way. If we find a v1 or v2 workspace, we + // return that over the v1 module, but we return this as the fallback. + var fallbackV1Module buftarget.ControllingWorkspace + // Similar to the mapping loop in buftarget for buftarget.BucketTargeting, we can't do + // this in a traditional loop like this: + // + // for curDirPath := path; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { + // + // If we do that, then we don't run terminateFunc for ".", which we want to so that we get + // the correct value for the terminate bool. + // + // Instead, we effectively do a do-while loop. + curDirPath := path + for { + // First, check for a v1 or v2 controlling workspace + controllingWorkspace, err := buftarget.TerminateAtControllingWorkspace( + ctx, + bucket, + curDirPath, + path, + ) + if err != nil { + return nil, err + } + if controllingWorkspace != nil { + logger.Debug( + "buffetch termination found", + zap.String("curDirPath", curDirPath), + zap.String("path", path), + ) + return controllingWorkspace, nil + } + // Then check for a v1 module + v1Module, err := buftarget.TerminateAtV1Module(ctx, bucket, curDirPath, path) + if err != nil { + return nil, err + } + if v1Module != nil { + if fallbackV1Module != nil { + return nil, fmt.Errorf("nested modules %q and %q are not allowed", fallbackV1Module.Path(), v1Module.Path()) + } + fallbackV1Module = v1Module + } + if curDirPath == "." { + break + } + curDirPath = normalpath.Dir(curDirPath) + } + logger.Debug( + "buffetch no termination found", + zap.String("path", path), + ) + return fallbackV1Module, nil +} + +func checkForOverlap(inputPath string, moduleDirPaths []string) error { + for _, moduleDirPath := range moduleDirPaths { + if normalpath.ContainsPath(moduleDirPath, inputPath, normalpath.Relative) { + return fmt.Errorf("failed to build input %q because it is contained by directory %q", inputPath, moduleDirPath) + } + } + return fmt.Errorf("input path %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) +} diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index 59b3251f83..cadee5908d 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -255,9 +255,10 @@ func TestFail7(t *testing.T) { t, nil, bufctl.ExitCodeFileAnnotation, - filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory "fail/buf". -testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, such as "one_two".`), - "", // stderr should be empty + "", // stdout is empty + // This is new behavior we introduced. When setting a config override, we no longer do + // a search for the controlling workspace. See bufctl/option.go for additional details. + filepath.FromSlash(`Failure: testdata/export/other/proto/unimported.proto: import "another.proto": file does not exist`), "lint", "--path", filepath.Join("testdata", "fail", "buf", "buf.proto"), @@ -273,7 +274,7 @@ testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, // during the refactor. This is actually more correct - pre-refactor, the CLI was acting // as if the buf.yaml at testdata/fail/buf.yaml mattered in some way. In fact, it doesn't - // you've said that you have overridden it entirely. - filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory "testdata/fail/buf". + filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory ".". testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, such as "one_two".`), "", // stderr should be empty "lint", @@ -1977,7 +1978,7 @@ func TestFormatSingleFile(t *testing.T) { "format", filepath.Join("testdata", "format", "simple"), "-o", - filepath.Join(tempDir, "simple.formatted.proto"), + filepath.Join(tempDir, "simple.formatted"), ) testRunStdout( t, @@ -1985,7 +1986,7 @@ func TestFormatSingleFile(t *testing.T) { 0, ``, "format", - filepath.Join(tempDir, "simple.formatted.proto"), + filepath.Join(tempDir, "simple.formatted"), "-d", ) } diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index b50273f017..db9c6b504f 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1121,26 +1121,17 @@ func TestWorkspaceInputOverlapFail(t *testing.T) { // The target input cannot overlap with any of the directories defined // in the workspace. t.Parallel() - // TODO failing test - t.Skip() testRunStdoutStderrNoWarn( t, nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "proto/buf" because it is contained by directory "proto" listed in testdata/workspace/fail/overlap/buf.work.yaml`), + filepath.FromSlash(`Failure: failed to build input "proto/buf" because it is contained by directory "proto"`), "build", filepath.Join("testdata", "workspace", "fail", "overlap", "proto", "buf"), ) - testRunStdoutStderrNoWarn( - t, - nil, - 1, - ``, - filepath.FromSlash(`Failure: failed to build input "other" because it contains directory "other/proto" listed in testdata/workspace/success/dir/buf.work.yaml`), - "build", - filepath.Join("testdata", "workspace", "success", "dir", "other"), - ) + // This works because of our fallback logic, so we build the workspace at testdata/workspace/success/dir + testRunStdout(t, nil, 0, ``, "build", filepath.Join("testdata", "workspace", "success", "dir", "other")) } func TestWorkspaceNoVersionFail(t *testing.T) { diff --git a/private/bufpkg/bufmodule/module_read_bucket.go b/private/bufpkg/bufmodule/module_read_bucket.go index 2ac8f9e66e..aab927640d 100644 --- a/private/bufpkg/bufmodule/module_read_bucket.go +++ b/private/bufpkg/bufmodule/module_read_bucket.go @@ -362,6 +362,7 @@ func (b *moduleReadBucket) WalkFileInfos( } return protoFileTracker.validate() } + // If we are walking target files, then we track the module if it is the target module if b.module.IsTarget() { protoFileTracker.trackModule(b.module) From 5cd61f8d354271958b2cead5c6c61fe3ba8111f5 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Fri, 1 Mar 2024 12:42:32 -0500 Subject: [PATCH 07/29] Fix all private/buf/cmd/buf integration tests --- .../buffetch/internal/read_bucket_closer.go | 9 +++----- .../buffetch/internal/read_write_bucket.go | 9 +++----- private/buf/buffetch/internal/reader.go | 10 ++------- private/buf/buftarget/bucket_targeting.go | 15 +++---------- private/buf/bufworkspace/module_targeting.go | 2 +- private/buf/bufworkspace/option.go | 5 +---- .../buf/bufworkspace/workspace_targeting.go | 5 +---- private/bufpkg/bufmodule/module.go | 21 ++++++------------- private/pkg/storage/storageos/bucket.go | 10 ++------- .../pkg/storage/storageutil/storageutil.go | 5 +---- 10 files changed, 23 insertions(+), 68 deletions(-) diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index 28c3a96ea3..43be5d446a 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -35,15 +35,12 @@ func newReadBucketCloser( storageReadBucketCloser storage.ReadBucketCloser, bucketPath string, bucketTargeting buftarget.BucketTargeting, -) (*readBucketCloser, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) - if err != nil { - return nil, err - } +) *readBucketCloser { + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputPath()) return &readBucketCloser{ ReadBucketCloser: storageReadBucketCloser, subDirPath: normalizedSubDirPath, - }, nil + } } func newReadBucketCloserForReadWriteBucket( diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index 5dbade8311..75bd1adab7 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -32,15 +32,12 @@ func newReadWriteBucket( storageReadWriteBucket storage.ReadWriteBucket, bucketPath string, bucketTargeting buftarget.BucketTargeting, -) (*readWriteBucket, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(bucketTargeting.InputPath()) - if err != nil { - return nil, err - } +) *readWriteBucket { + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputPath()) return &readWriteBucket{ ReadWriteBucket: storageReadWriteBucket, subDirPath: normalizedSubDirPath, - }, nil + } } func (r *readWriteBucket) SubDirPath() string { diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 7e57e99de8..0caeea4a37 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -602,10 +602,7 @@ func getReadBucketCloserForBucket( zap.String("bucketPath", bucketPath), zap.Strings("targetPaths", bucketTargeting.TargetPaths()), ) - readBucketCloser, err := newReadBucketCloser(inputBucket, bucketPath, bucketTargeting) - if err != nil { - return nil, nil, err - } + readBucketCloser := newReadBucketCloser(inputBucket, bucketPath, bucketTargeting) return readBucketCloser, bucketTargeting, nil } @@ -771,10 +768,7 @@ func getReadWriteBucketForOS( if err != nil { return nil, nil, err } - readWriteBucket, err := newReadWriteBucket(bucket, bucketPath, bucketTargeting) - if err != nil { - return nil, nil, err - } + readWriteBucket := newReadWriteBucket(bucket, bucketPath, bucketTargeting) return readWriteBucket, bucketTargeting, nil } diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 669d6045f1..6c23b128ec 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -115,10 +115,7 @@ func newBucketTargeting( // If a controlling workspace was found, we map the paths to the controlling workspace // because we'll be working with a workspace bucket. for i, targetPath := range targetPaths { - targetPath, err := normalpath.NormalizeAndValidate(targetPath) - if err != nil { - return nil, err - } + targetPath := normalpath.Normalize(targetPath) mappedTargetPath, err := normalpath.Rel(controllingWorkspace.Path(), targetPath) if err != nil { return nil, err @@ -126,10 +123,7 @@ func newBucketTargeting( mappedTargetPaths[i] = mappedTargetPath } for i, targetExcludePath := range targetExcludePaths { - targetExcludePath, err := normalpath.NormalizeAndValidate(targetExcludePath) - if err != nil { - return nil, err - } + targetExcludePath := normalpath.Normalize(targetExcludePath) mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace.Path(), targetExcludePath) if err != nil { return nil, err @@ -154,10 +148,7 @@ func mapControllingWorkspaceAndPath( path string, terminateFunc TerminateFunc, ) (ControllingWorkspace, string, error) { - path, err := normalpath.NormalizeAndValidate(path) - if err != nil { - return nil, "", err - } + path = normalpath.Normalize(path) // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at // the path. if terminateFunc == nil { diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 44c069208d..cbcd591a7a 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -173,7 +173,7 @@ func applyRootsToTargetPath(roots []string, path string, pathType normalpath.Pat return "", err } // just in case - return normalpath.NormalizeAndValidate(targetPath) + return normalpath.Normalize(targetPath), nil default: // this should never happen return "", fmt.Errorf("%q is contained in multiple roots %s", path, stringutil.SliceToHumanStringQuoted(roots)) diff --git a/private/buf/bufworkspace/option.go b/private/buf/bufworkspace/option.go index a12b03ab32..8412a1bf10 100644 --- a/private/buf/bufworkspace/option.go +++ b/private/buf/bufworkspace/option.go @@ -211,10 +211,7 @@ func newWorkspaceBucketConfig(options []WorkspaceBucketOption) (*workspaceBucket option.applyToWorkspaceBucketConfig(config) } var err error - config.targetSubDirPath, err = normalpath.NormalizeAndValidate(config.targetSubDirPath) - if err != nil { - return nil, err - } + config.targetSubDirPath = normalpath.Normalize(config.targetSubDirPath) config.targetPaths, err = slicesext.MapError( config.targetPaths, normalpath.NormalizeAndValidate, diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index ae4bfec9ab..94adf3498e 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -552,10 +552,7 @@ func checkForControllingWorkspaceOrV1Module( bucket storage.ReadBucket, path string, ) (buftarget.ControllingWorkspace, error) { - path, err := normalpath.NormalizeAndValidate(path) - if err != nil { - return nil, err - } + path = normalpath.Normalize(path) // Keep track of any v1 module found along the way. If we find a v1 or v2 workspace, we // return that over the v1 module, but we return this as the fallback. var fallbackV1Module buftarget.ControllingWorkspace diff --git a/private/bufpkg/bufmodule/module.go b/private/bufpkg/bufmodule/module.go index 4da64d1a44..0ebdac339d 100644 --- a/private/bufpkg/bufmodule/module.go +++ b/private/bufpkg/bufmodule/module.go @@ -284,24 +284,15 @@ func newModule( return nil, syserror.New("moduleFullName not present and commitID present when constructing a remote Module") } - normalizeAndValidateIfNotEmpty := func(path string) (string, error) { + normalizeIfNotEmpty := func(path string) string { if path == "" { - return path, nil + return path } - return normalpath.NormalizeAndValidate(path) - } - targetPaths, err := slicesext.MapError(targetPaths, normalizeAndValidateIfNotEmpty) - if err != nil { - return nil, syserror.Wrap(err) - } - targetExcludePaths, err = slicesext.MapError(targetExcludePaths, normalizeAndValidateIfNotEmpty) - if err != nil { - return nil, syserror.Wrap(err) - } - protoFileTargetPath, err = normalizeAndValidateIfNotEmpty(protoFileTargetPath) - if err != nil { - return nil, syserror.Wrap(err) + return normalpath.Normalize(path) } + targetPaths = slicesext.Map(targetPaths, normalizeIfNotEmpty) + targetExcludePaths = slicesext.Map(targetExcludePaths, normalizeIfNotEmpty) + protoFileTargetPath = normalizeIfNotEmpty(protoFileTargetPath) module := &module{ ctx: ctx, diff --git a/private/pkg/storage/storageos/bucket.go b/private/pkg/storage/storageos/bucket.go index 19d712b4db..7dd992c261 100644 --- a/private/pkg/storage/storageos/bucket.go +++ b/private/pkg/storage/storageos/bucket.go @@ -140,10 +140,7 @@ func (b *bucket) Walk( return err } // just in case - path, err = normalpath.NormalizeAndValidate(path) - if err != nil { - return err - } + path = normalpath.Normalize(path) if err := f( storageutil.NewObjectInfo( path, @@ -313,10 +310,7 @@ func (b *bucket) validateExternalPath(path string, externalPath string) error { } func (b *bucket) getExternalPrefix(prefix string) (string, error) { - prefix, err := storageutil.ValidatePrefix(prefix) - if err != nil { - return "", err - } + prefix = normalpath.Normalize(prefix) realClean, err := filepathext.RealClean(normalpath.Join(b.rootPath, prefix)) if err != nil { return "", err diff --git a/private/pkg/storage/storageutil/storageutil.go b/private/pkg/storage/storageutil/storageutil.go index 8346504e56..d8d8b8ea8d 100644 --- a/private/pkg/storage/storageutil/storageutil.go +++ b/private/pkg/storage/storageutil/storageutil.go @@ -63,10 +63,7 @@ func (o ObjectInfo) LocalPath() string { // ValidatePath validates a path. func ValidatePath(path string) (string, error) { - path, err := normalpath.NormalizeAndValidate(path) - if err != nil { - return "", err - } + path = normalpath.Normalize(path) if path == "." { return "", errors.New("cannot use root") } From 95d6877ee9f7956cd74745f3c3c8ea619041aaa7 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Fri, 1 Mar 2024 16:44:10 -0500 Subject: [PATCH 08/29] Fix issue with input paths outside of the current working dir context --- private/buf/buffetch/internal/reader.go | 151 +++++++++++------- private/buf/buftarget/bucket_targeting.go | 22 ++- .../pkg/storage/storageutil/storageutil.go | 5 +- 3 files changed, 111 insertions(+), 67 deletions(-) diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 0caeea4a37..cbe38ac639 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -655,100 +655,115 @@ func getReadWriteBucketForOS( if err != nil { return nil, nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) } - // We now know where the workspace is relative to the FS root. - // If the input path is provided as an absolute path, we create a new bucket for the - // controlling workspace using an absolute path. - // Otherwise we current working directory (pwd) and create the bucket using a relative - // path to that. + // osRootBucketTargeting returns the information on whether or not a controlling + // workspace was found based on the input path. // - // Examples: + // *** CONTROLLING WOKRSPACE FOUND *** // - // inputDirPath: path/to/foo - // terminateFileLocation: path/to - // returnMapPath: path/to - // returnSubDirPath: foo - // Make bucket on: Rel(pwd, controlling workspace) + // In the case where a controlling workspace is found, we want to create a new bucket + // for the controlling workspace. + // If the input path is an absolute path, we want to use an absolute path: // - // inputDirPath: /users/alice/path/to/foo - // terminateFileLocation: /users/alice/path/to - // returnMapPath: /users/alice/path/to - // returnSubDirPath: foo - // Make bucket on: FS root + controlling workspace (since absolute) + // bucketPath := fsRoot + controllingWorkspace.Path() // - // If no controlling workspace is found, for absolute paths, close the workspace at the - // input path. For relative paths, we use the current working directory. + // If the input path is a relative path, we want to use a relative path between the + // current working directory (pwd) and controlling workspace. + // + // bucketPath := Rel(Rel(fsRoot, pwd), controllingWorkspace.Path()) + // + // We do not need to remap the input path, target paths, and target exclude paths + // returned by buftarget.BucketTargeting, because they are already relative to the + // controlling workpsace. + // + // *** CONTROLLING WOKRSPACE NOT FOUND *** + // + // In the case where a controlling workpsace is not found, we have three outcomes for + // creating a new bucket. + // If the input path is an absolute path, we want to use an absolute path to the input + // path: + // + // bucketPath := Abs(inputPath) + // + // If the input path is a relative path, there are two possible outcomes: the input path, + // target paths, and target exclude paths are within the context of the working directory + // or outside of the context of the working directory. + // + // In the case where the input path, target paths, and target exclude paths are within + // the context of the working directory, we use pwd: + // + // bucketPath := Rel(fsRoot,pwd) + // + // In the case where the input path, target paths, and target exclude paths are outside + // the context of the working directory, we use the input path relative to the pwd: + // + // bucketPath := Rel(Rel(fsRoot,pwd), Rel(fsRoot, inputPath)) + // + // For all cases where no controlling workspace was found, we need to remap the input + // path, target paths, and target exclude paths to the root of the new bucket. var bucketPath string var inputPath string bucketTargetPaths := make([]string, len(osRootBucketTargeting.TargetPaths())) bucketTargetExcludePaths := make([]string, len(osRootBucketTargeting.TargetExcludePaths())) - if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { - if osRootBucketTargeting.ControllingWorkspace() != nil { + if controllingWorkspace := osRootBucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { + if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspace().Path()) - inputPath = osRootBucketTargeting.InputPath() - bucketTargetPaths = osRootBucketTargeting.TargetPaths() - bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { - bucketPath = normalpath.Join(fsRoot, fsRootInputSubDirPath) - inputPath, err = normalpath.Rel(fsRootInputSubDirPath, fsRootInputSubDirPath) + // Relative input path + pwdFSRelPath, err := getPWDFSRelPath() if err != nil { return nil, nil, err } - for i, targetPath := range osRootBucketTargeting.TargetPaths() { - bucketTargetPaths[i], err = normalpath.Rel(fsRootInputSubDirPath, targetPath) - if err != nil { - return nil, nil, err - } + bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspace().Path()) + if err != nil { + return nil, nil, err } - for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { - bucketTargetExcludePaths[i], err = normalpath.Rel(fsRootInputSubDirPath, targetExcludePath) + } + inputPath = osRootBucketTargeting.InputPath() + bucketTargetPaths = osRootBucketTargeting.TargetPaths() + bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() + } else { + // No controlling workspace found + if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { + bucketPath = inputDirPath + } else { + // Relative input path + pwdFSRelPath, err := getPWDFSRelPath() + if err != nil { + return nil, nil, err + } + if filepath.IsLocal(normalpath.Unnormalize(inputDirPath)) { + // Use current working directory + bucketPath = "." + } else { + // Relative input path outside of working directory context + bucketPath, err = normalpath.Rel(pwdFSRelPath, fsRootInputSubDirPath) if err != nil { return nil, nil, err } } } - } else { - pwd, err := osext.Getwd() + // Map input path, target paths, and target exclude paths to the new bucket path. + _, bucketPathFSRelPath, err := fsRootAndFSRelPathForPath(bucketPath) if err != nil { return nil, nil, err } - _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + inputPath, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputPath()) if err != nil { return nil, nil, err } - if osRootBucketTargeting.ControllingWorkspace() != nil { - bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspace().Path()) + for i, targetPath := range osRootBucketTargeting.TargetPaths() { + bucketTargetPaths[i], err = normalpath.Rel(bucketPathFSRelPath, targetPath) if err != nil { return nil, nil, err } - inputPath = osRootBucketTargeting.InputPath() - bucketTargetPaths = osRootBucketTargeting.TargetPaths() - bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() - - } else { - bucketPath = "." - inputPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.InputPath()) + } + for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { + bucketTargetExcludePaths[i], err = normalpath.Rel(bucketPathFSRelPath, targetExcludePath) if err != nil { return nil, nil, err } - for i, targetPath := range osRootBucketTargeting.TargetPaths() { - bucketTargetPaths[i], err = normalpath.Rel(pwdFSRelPath, targetPath) - if err != nil { - return nil, nil, err - } - } - for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { - bucketTargetExcludePaths[i], err = normalpath.Rel(pwdFSRelPath, targetExcludePath) - if err != nil { - return nil, nil, err - } - } } } - // Now that we've mapped the workspace against the FS root, we recreate the bucket with - // at the sub dir path. - // First we get the absolute path of the controlling workspace, which based on the OS root - // bucket targeting is the FS root joined with the controlling workspace path. - // And we use it to make a bucket. bucket, err := storageosProvider.NewReadWriteBucket( bucketPath, storageos.ReadWriteBucketWithSymlinksIfSupported(), @@ -800,6 +815,20 @@ func getReadBucketCloserForOSProtoFile( return newReadBucketCloserForReadWriteBucket(readWriteBucket), bucketTargeting, nil } +// getPWDFSRelPath is a helper function that gets the relative path of the current working +// directory to the FS root. +func getPWDFSRelPath() (string, error) { + pwd, err := osext.Getwd() + if err != nil { + return "", err + } + _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + if err != nil { + return "", err + } + return pwdFSRelPath, nil +} + // fsRootAndFSRelPathForPath is a helper function that takes a path and returns the FS // root and relative path to the FS root. func fsRootAndFSRelPathForPath(path string) (string, string, error) { diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 6c23b128ec..0280eabe3e 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -22,9 +22,8 @@ import ( "go.uber.org/zap" ) -// BucketTargeting provides the path to the controllng workspace, target paths, and target -// exclude paths mapped to the bucket. -// All paths for targeting information are normalized. +// BucketTargeting provides targeting information for the bucket based on any controlling +// workspaces that have been found. type BucketTargeting interface { // ControllingWorkpsace returns the information for the controlling workspace, if one was // found. If not found, then this will be nil. @@ -42,7 +41,12 @@ type BucketTargeting interface { // NewBucketTargeting returns new targeting information for the given bucket, input path, // target paths, and target exclude paths. // -// Paths must be relative. +// The inputPath, targetPaths, and targetExcludePaths are expected to be relative to the +// root of the bucket. +// +// If a controlling workspace is found, the input path, target paths, and target exclude +// paths are mapped as relative paths to the controlling workspace path. +// Otherwise, the input path, target paths, and target exclude paths are returned as-is. func NewBucketTargeting( ctx context.Context, logger *zap.Logger, @@ -52,7 +56,15 @@ func NewBucketTargeting( targetExcludePaths []string, terminateFunc TerminateFunc, ) (BucketTargeting, error) { - return newBucketTargeting(ctx, logger, bucket, inputPath, targetPaths, targetExcludePaths, terminateFunc) + return newBucketTargeting( + ctx, + logger, + bucket, + inputPath, + targetPaths, + targetExcludePaths, + terminateFunc, + ) } // *** PRIVATE *** diff --git a/private/pkg/storage/storageutil/storageutil.go b/private/pkg/storage/storageutil/storageutil.go index d8d8b8ea8d..8346504e56 100644 --- a/private/pkg/storage/storageutil/storageutil.go +++ b/private/pkg/storage/storageutil/storageutil.go @@ -63,7 +63,10 @@ func (o ObjectInfo) LocalPath() string { // ValidatePath validates a path. func ValidatePath(path string) (string, error) { - path = normalpath.Normalize(path) + path, err := normalpath.NormalizeAndValidate(path) + if err != nil { + return "", err + } if path == "." { return "", errors.New("cannot use root") } From 80461510c157b5763555d134dcb0ee20529c9722 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 14:04:17 -0500 Subject: [PATCH 09/29] Fix the last of the tests --- .../cmd/buf/command/generate/generate_test.go | 2 +- .../command/generate/generate_unix_test.go | 63 +++++++------------ .../proto/workspacetest/workspace_test.go | 2 +- .../bufcheck/bufbreaking/bufbreaking_test.go | 4 +- .../bufpkg/bufcheck/buflint/buflint_test.go | 2 +- private/pkg/storage/storageos/bucket.go | 5 +- 6 files changed, 30 insertions(+), 48 deletions(-) diff --git a/private/buf/cmd/buf/command/generate/generate_test.go b/private/buf/cmd/buf/command/generate/generate_test.go index 34c47033e4..5d81c90e98 100644 --- a/private/buf/cmd/buf/command/generate/generate_test.go +++ b/private/buf/cmd/buf/command/generate/generate_test.go @@ -224,7 +224,7 @@ func TestOutputWithPathEqualToExclude(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: a/v1/a.proto`), + filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: testdata/paths/a/v1/a.proto`), "--output", tempDirPath, "--template", diff --git a/private/buf/cmd/buf/command/generate/generate_unix_test.go b/private/buf/cmd/buf/command/generate/generate_unix_test.go index dbb42d7ee4..7eab96dccf 100644 --- a/private/buf/cmd/buf/command/generate/generate_unix_test.go +++ b/private/buf/cmd/buf/command/generate/generate_unix_test.go @@ -79,8 +79,14 @@ func TestOutputWithExclude(t *testing.T) { func TestOutputWithPathWithinExclude(t *testing.T) { t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "testdata/paths/a" contains targeted path "testdata/paths/a/v1/a.proto", which means all paths in "testdata/paths/a/v1/a.proto" will be excluded`), "--output", tempDirPath, "--template", @@ -90,11 +96,6 @@ func TestOutputWithPathWithinExclude(t *testing.T) { "--exclude-path", filepath.Join("testdata", "paths", "a"), ) - - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Contains(t, err.Error(), "no such file or directory") } func TestOutputWithExcludeWithinPath(t *testing.T) { @@ -126,8 +127,14 @@ func TestOutputWithExcludeWithinPath(t *testing.T) { func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "--output", tempDirPath, "--template", @@ -140,30 +147,19 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { filepath.Join("testdata", "paths", "a", "v3", "foo"), filepath.Join("testdata", "paths"), ) - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "FooOuterClass.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "b", "v1", "B.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "BarOuterClass.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") } func TestWorkspaceGenerateWithExcludeAndTargetPaths(t *testing.T) { t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), "--output", tempDirPath, "--template", @@ -178,21 +174,4 @@ func TestWorkspaceGenerateWithExcludeAndTargetPaths(t *testing.T) { filepath.Join("testdata", "workspace", "b", "v1", "foo.proto"), filepath.Join("testdata", "workspace"), ) - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "FooOuterClass.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "b", "v1", "B.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "BarOuterClass.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") } diff --git a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go index 66dfe0071d..68095f7134 100644 --- a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go +++ b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go @@ -177,7 +177,7 @@ func TestWorkspaceOverlapSubDirectory(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto" listed in ../../../buf.work.yaml`), + filepath.FromSlash(`Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto"`), "build", filepath.Join("..", "one"), ) diff --git a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go index 49ca07a19a..8433f6288b 100644 --- a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go +++ b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go @@ -770,7 +770,7 @@ func testBreaking( ctx, logger, previousReadWriteBucket, - previousDirPath, + ".", // the bucket is rooted at the input nil, nil, buftarget.TerminateAtControllingWorkspace, @@ -785,7 +785,7 @@ func testBreaking( ctx, logger, readWriteBucket, - dirPath, + ".", // the bucket is rooted at the input nil, nil, buftarget.TerminateAtControllingWorkspace, diff --git a/private/bufpkg/bufcheck/buflint/buflint_test.go b/private/bufpkg/bufcheck/buflint/buflint_test.go index 4931415ff8..54a661156d 100644 --- a/private/bufpkg/bufcheck/buflint/buflint_test.go +++ b/private/bufpkg/bufcheck/buflint/buflint_test.go @@ -1173,7 +1173,7 @@ func testLintWithOptions( ctx, zaptest.NewLogger(t), readWriteBucket, - dirPath, + ".", // the bucket is rooted at the input nil, nil, buftarget.TerminateAtControllingWorkspace, diff --git a/private/pkg/storage/storageos/bucket.go b/private/pkg/storage/storageos/bucket.go index 7dd992c261..d2bb7e6974 100644 --- a/private/pkg/storage/storageos/bucket.go +++ b/private/pkg/storage/storageos/bucket.go @@ -310,7 +310,10 @@ func (b *bucket) validateExternalPath(path string, externalPath string) error { } func (b *bucket) getExternalPrefix(prefix string) (string, error) { - prefix = normalpath.Normalize(prefix) + prefix, err := normalpath.NormalizeAndValidate(prefix) + if err != nil { + return "", err + } realClean, err := filepathext.RealClean(normalpath.Join(b.rootPath, prefix)) if err != nil { return "", err From 859e0187ec49f0f31e96ebf6a2156a7c37ca993f Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 14:15:32 -0500 Subject: [PATCH 10/29] Fix lint --- private/buf/buffetch/internal/reader_test.go | 2 -- private/buf/buftarget/controlling_workspace.go | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index c9ef533da4..e8d90a5130 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -29,8 +29,6 @@ import ( "go.uber.org/zap" ) -// TODO(doria): add tests with target paths and target exclude paths - func TestGetReadBucketCloserForBucketNoTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go index b73fbbce31..c7f42a2e97 100644 --- a/private/buf/buftarget/controlling_workspace.go +++ b/private/buf/buftarget/controlling_workspace.go @@ -1,3 +1,17 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package buftarget import ( From 36c73b1410add933030a60b0982169df2432d6ac Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 15:03:12 -0500 Subject: [PATCH 11/29] Rename InputPath to InputDir for bucket targeting for clarity --- private/buf/bufctl/controller.go | 2 +- .../buffetch/internal/read_bucket_closer.go | 2 +- .../buffetch/internal/read_write_bucket.go | 2 +- private/buf/buffetch/internal/reader.go | 46 +++++++++---------- private/buf/buffetch/internal/reader_test.go | 17 +------ private/buf/buftarget/bucket_targeting.go | 36 +++++++-------- .../workspace_dep_manager_provider.go | 2 +- .../buf/bufworkspace/workspace_targeting.go | 46 +++++++++---------- private/buf/bufworkspace/workspace_test.go | 6 +-- 9 files changed, 72 insertions(+), 87 deletions(-) diff --git a/private/buf/bufctl/controller.go b/private/buf/bufctl/controller.go index 2f6ccbbe87..65a342da7b 100644 --- a/private/buf/bufctl/controller.go +++ b/private/buf/bufctl/controller.go @@ -846,7 +846,7 @@ func (c *controller) getWorkspaceForSourceRef( }() options := []bufworkspace.WorkspaceBucketOption{ bufworkspace.WithTargetSubDirPath( - bucketTargeting.InputPath(), + bucketTargeting.InputDir(), ), bufworkspace.WithTargetPaths( bucketTargeting.TargetPaths(), diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index 43be5d446a..79827f440e 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -36,7 +36,7 @@ func newReadBucketCloser( bucketPath string, bucketTargeting buftarget.BucketTargeting, ) *readBucketCloser { - normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputPath()) + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDir()) return &readBucketCloser{ ReadBucketCloser: storageReadBucketCloser, subDirPath: normalizedSubDirPath, diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index 75bd1adab7..cd0f700087 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -33,7 +33,7 @@ func newReadWriteBucket( bucketPath string, bucketTargeting buftarget.BucketTargeting, ) *readWriteBucket { - normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputPath()) + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDir()) return &readWriteBucket{ ReadWriteBucket: storageReadWriteBucket, subDirPath: normalizedSubDirPath, diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index cbe38ac639..7b21e54577 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -587,7 +587,7 @@ func getReadBucketCloserForBucket( if bucketTargeting.ControllingWorkspace() != nil { bucketPath = bucketTargeting.ControllingWorkspace().Path() } else { - bucketPath = bucketTargeting.InputPath() + bucketPath = bucketTargeting.InputDir() } if bucketPath != "." { inputBucket = storage.MapReadBucketCloser( @@ -656,22 +656,22 @@ func getReadWriteBucketForOS( return nil, nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) } // osRootBucketTargeting returns the information on whether or not a controlling - // workspace was found based on the input path. + // workspace was found based on the inputDirPath. // // *** CONTROLLING WOKRSPACE FOUND *** // // In the case where a controlling workspace is found, we want to create a new bucket // for the controlling workspace. - // If the input path is an absolute path, we want to use an absolute path: + // If the inputDirPath is an absolute path, we want to use an absolute path: // // bucketPath := fsRoot + controllingWorkspace.Path() // - // If the input path is a relative path, we want to use a relative path between the + // If the inputDirPath is a relative path, we want to use a relative path between the // current working directory (pwd) and controlling workspace. // // bucketPath := Rel(Rel(fsRoot, pwd), controllingWorkspace.Path()) // - // We do not need to remap the input path, target paths, and target exclude paths + // We do not need to remap the input dir, target paths, and target exclude paths // returned by buftarget.BucketTargeting, because they are already relative to the // controlling workpsace. // @@ -679,36 +679,36 @@ func getReadWriteBucketForOS( // // In the case where a controlling workpsace is not found, we have three outcomes for // creating a new bucket. - // If the input path is an absolute path, we want to use an absolute path to the input + // If the inputDirPath is an absolute path, we want to use an absolute path to the input // path: // - // bucketPath := Abs(inputPath) + // bucketPath := Abs(inputDirPath) // - // If the input path is a relative path, there are two possible outcomes: the input path, - // target paths, and target exclude paths are within the context of the working directory - // or outside of the context of the working directory. + // If the inputDirPath is a relative path, there are two possible outcomes: the input + // dir is within the context of the working directory or is outside of the context of + // the working directory. // - // In the case where the input path, target paths, and target exclude paths are within - // the context of the working directory, we use pwd: + // In the case where the input dir, is within the context of the working directory, we + // use pwd: // // bucketPath := Rel(fsRoot,pwd) // - // In the case where the input path, target paths, and target exclude paths are outside - // the context of the working directory, we use the input path relative to the pwd: + // In the case where the input dir is outside the context of the working directory, we + // use the input dir relative to the pwd: // - // bucketPath := Rel(Rel(fsRoot,pwd), Rel(fsRoot, inputPath)) + // bucketPath := Rel(Rel(fsRoot,pwd), Rel(fsRoot, inputDirPath)) // // For all cases where no controlling workspace was found, we need to remap the input // path, target paths, and target exclude paths to the root of the new bucket. var bucketPath string - var inputPath string + var inputDir string bucketTargetPaths := make([]string, len(osRootBucketTargeting.TargetPaths())) bucketTargetExcludePaths := make([]string, len(osRootBucketTargeting.TargetExcludePaths())) if controllingWorkspace := osRootBucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspace().Path()) } else { - // Relative input path + // Relative input dir pwdFSRelPath, err := getPWDFSRelPath() if err != nil { return nil, nil, err @@ -718,7 +718,7 @@ func getReadWriteBucketForOS( return nil, nil, err } } - inputPath = osRootBucketTargeting.InputPath() + inputDir = osRootBucketTargeting.InputDir() bucketTargetPaths = osRootBucketTargeting.TargetPaths() bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { @@ -726,7 +726,7 @@ func getReadWriteBucketForOS( if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { bucketPath = inputDirPath } else { - // Relative input path + // Relative input dir pwdFSRelPath, err := getPWDFSRelPath() if err != nil { return nil, nil, err @@ -735,19 +735,19 @@ func getReadWriteBucketForOS( // Use current working directory bucketPath = "." } else { - // Relative input path outside of working directory context + // Relative input dir outside of working directory context bucketPath, err = normalpath.Rel(pwdFSRelPath, fsRootInputSubDirPath) if err != nil { return nil, nil, err } } } - // Map input path, target paths, and target exclude paths to the new bucket path. + // Map input dir, target paths, and target exclude paths to the new bucket path. _, bucketPathFSRelPath, err := fsRootAndFSRelPathForPath(bucketPath) if err != nil { return nil, nil, err } - inputPath, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputPath()) + inputDir, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputDir()) if err != nil { return nil, nil, err } @@ -775,7 +775,7 @@ func getReadWriteBucketForOS( ctx, logger, bucket, - inputPath, + inputDir, bucketTargetPaths, bucketTargetExcludePaths, terminateFunc, diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index e8d90a5130..11aa823a20 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -46,7 +46,6 @@ func TestGetReadBucketCloserForBucketNoTerminateFileName(t *testing.T) { require.NoError(t, err) require.Nil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, "three/four/five", readBucketCloser.SubDirPath()) - require.Equal(t, "three/four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -69,7 +68,6 @@ func TestGetReadBucketCloserTerminateFileName(t *testing.T) { require.NoError(t, err) require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -92,7 +90,6 @@ func TestGetReadBucketCloserForBucketNoSubDirPath(t *testing.T) { require.NoError(t, err) require.Nil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", readBucketCloser.SubDirPath()) - require.Equal(t, ".", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -117,7 +114,6 @@ func TestGetReadBucketCloserForBucketAbs(t *testing.T) { require.NoError(t, err) require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -138,7 +134,6 @@ func TestGetReadWriteBucketForOSNoTerminateFileName(t *testing.T) { require.NoError(t, err) require.Nil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, "testdata/bufyaml/one/two/three/four/five", readWriteBucket.SubDirPath()) - require.Equal(t, "testdata/bufyaml/one/two/three/four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "testdata/bufyaml/one/two/three/four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -159,7 +154,6 @@ func TestGetReadWriteBucketForOSTerminateFileName(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -194,7 +188,6 @@ func TestGetReadWriteBucketForOSParentPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -231,7 +224,6 @@ func TestGetReadWriteBucketForOSAbsPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/buf.yaml"), filepath.ToSlash(fileInfo.ExternalPath())) @@ -253,7 +245,6 @@ func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testin require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "proto", readBucketCloser.SubDirPath()) - require.Equal(t, "proto", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "buf.yaml") @@ -274,7 +265,6 @@ func TestGetReadBucketCloserForOSProtoFileTerminateFileName(t *testing.T) { ) require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) @@ -311,7 +301,6 @@ func TestGetReadBucketCloserForOSProtoFileParentPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") @@ -349,7 +338,6 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") @@ -374,7 +362,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLTerminateFileName(t *testing. require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") @@ -407,7 +394,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLParentPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five/proto", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") @@ -446,7 +432,6 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { require.NoError(t, err) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) - require.Equal(t, "four/five", bucketTargeting.InputPath()) require.Len(t, bucketTargeting.TargetPaths(), 1) require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") @@ -464,7 +449,7 @@ func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) buftarget.Ter ctx context.Context, bucket storage.ReadBucket, prefix string, - inputPath string, + inputDir string, ) (buftarget.ControllingWorkspace, error) { for _, terminateFileName := range terminateFileNames { // We do not test for config file logic here, so it is fine to return empty configs. diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 0280eabe3e..61650b3a9f 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -28,8 +28,8 @@ type BucketTargeting interface { // ControllingWorkpsace returns the information for the controlling workspace, if one was // found. If not found, then this will be nil. ControllingWorkspace() ControllingWorkspace - // InputPath returns the input path relative to the root of the bucket - InputPath() string + // InputDir returns the input directory relative to the root of the bucket + InputDir() string // TargetPaths returns the target paths relative to the root of the bucket. TargetPaths() []string // TargetExcludePaths returns the target exclude paths relative to the root of the bucket. @@ -38,20 +38,20 @@ type BucketTargeting interface { isBucketTargeting() } -// NewBucketTargeting returns new targeting information for the given bucket, input path, +// NewBucketTargeting returns new targeting information for the given bucket, input dir, // target paths, and target exclude paths. // -// The inputPath, targetPaths, and targetExcludePaths are expected to be relative to the +// The inputDir, targetPaths, and targetExcludePaths are expected to be relative to the // root of the bucket. // -// If a controlling workspace is found, the input path, target paths, and target exclude +// If a controlling workspace is found, the input dir, target paths, and target exclude // paths are mapped as relative paths to the controlling workspace path. -// Otherwise, the input path, target paths, and target exclude paths are returned as-is. +// Otherwise, the input dir, target paths, and target exclude paths are returned as-is. func NewBucketTargeting( ctx context.Context, logger *zap.Logger, bucket storage.ReadBucket, - inputPath string, + inputDir string, targetPaths []string, targetExcludePaths []string, terminateFunc TerminateFunc, @@ -60,7 +60,7 @@ func NewBucketTargeting( ctx, logger, bucket, - inputPath, + inputDir, targetPaths, targetExcludePaths, terminateFunc, @@ -75,7 +75,7 @@ var ( type bucketTargeting struct { controllingWorkspace ControllingWorkspace - inputPath string + inputDir string targetPaths []string targetExcludePaths []string } @@ -84,8 +84,8 @@ func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { return b.controllingWorkspace } -func (b *bucketTargeting) InputPath() string { - return b.inputPath +func (b *bucketTargeting) InputDir() string { + return b.inputDir } func (b *bucketTargeting) TargetPaths() []string { @@ -103,24 +103,24 @@ func newBucketTargeting( ctx context.Context, logger *zap.Logger, bucket storage.ReadBucket, - inputPath string, + inputDir string, targetPaths []string, targetExcludePaths []string, terminateFunc TerminateFunc, ) (*bucketTargeting, error) { - // First we map the controlling workspace for the inputPath. - controllingWorkspace, mappedInputPath, err := mapControllingWorkspaceAndPath( + // First we map the controlling workspace for the inputDir. + controllingWorkspace, mappedInputDir, err := mapControllingWorkspaceAndPath( ctx, logger, bucket, - inputPath, + inputDir, terminateFunc, ) if err != nil { return nil, err } // If no controlling workspace was found, we map the target paths and target exclude - // paths to the input path. + // paths to the input dir. mappedTargetPaths := targetPaths mappedTargetExcludePaths := targetExcludePaths if controllingWorkspace != nil && controllingWorkspace.Path() != "." { @@ -145,7 +145,7 @@ func newBucketTargeting( } return &bucketTargeting{ controllingWorkspace: controllingWorkspace, - inputPath: mappedInputPath, + inputDir: mappedInputDir, targetPaths: mappedTargetPaths, targetExcludePaths: mappedTargetExcludePaths, }, nil @@ -201,6 +201,6 @@ func mapControllingWorkspaceAndPath( "buffetch no termination found", zap.String("path", path), ) - // No controlling workspace is found, we simply return the input path + // No controlling workspace is found, we simply return the input dir return nil, path, nil } diff --git a/private/buf/bufworkspace/workspace_dep_manager_provider.go b/private/buf/bufworkspace/workspace_dep_manager_provider.go index e2004ff761..e9676fd160 100644 --- a/private/buf/bufworkspace/workspace_dep_manager_provider.go +++ b/private/buf/bufworkspace/workspace_dep_manager_provider.go @@ -91,5 +91,5 @@ func (w *workspaceDepManagerProvider) GetWorkspaceDepManager( if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { return newWorkspaceDepManager(bucket, controllingWorkspace.Path(), true), nil } - return newWorkspaceDepManager(bucket, bucketTargeting.InputPath(), false), nil + return newWorkspaceDepManager(bucket, bucketTargeting.InputDir(), false), nil } diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 94adf3498e..f5e5394a34 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -41,7 +41,7 @@ import ( // and mapped module buckets and module targeting. // // In the case where no controlling workspace is found, we default to a v1 workspace with -// a single module directory path, which is equivalent to the input path. +// a single module directory path, which is equivalent to the input dir. // // We only ever return one of v1 or v2. type workspaceTargeting struct { @@ -78,7 +78,7 @@ func newWorkspaceTargeting( if overrideBufYAMLFile != nil { logger.Debug( "targeting workspace with config override", - zap.String("input path", bucketTargeting.InputPath()), + zap.String("input dir", bucketTargeting.InputDir()), ) switch fileVersion := overrideBufYAMLFile.FileVersion(); fileVersion { case bufconfig.FileVersionV1Beta1, bufconfig.FileVersionV1: @@ -87,7 +87,7 @@ func newWorkspaceTargeting( config, bucket, bucketTargeting, - []string{bucketTargeting.InputPath()}, + []string{bucketTargeting.InputDir()}, overrideBufYAMLFile, ) case bufconfig.FileVersionV2: @@ -101,7 +101,7 @@ func newWorkspaceTargeting( if controllingWorkspace.BufYAMLFile() != nil { logger.Debug( "targeting workspace based on v2 buf.yaml", - zap.String("input path", bucketTargeting.InputPath()), + zap.String("input dir", bucketTargeting.InputDir()), ) return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, controllingWorkspace.BufYAMLFile()) } @@ -111,7 +111,7 @@ func newWorkspaceTargeting( // This means that we attempted to target a v1 workspace at the buf.work.yaml, not // an individual module within the v1 workspace defined in buf.work.yaml. // This is disallowed. - if bucketTargeting.InputPath() == "." { + if bucketTargeting.InputDir() == "." { return nil, errors.New(`Workspaces defined with buf.work.yaml cannot be updated or pushed, only the individual modules within a workspace can be updated or pushed. Workspaces defined with a v2 buf.yaml can be updated, see the migration documentation for more details.`) @@ -120,14 +120,14 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m // the workspace entirely, and just act as if the buf.work.yaml did not exist. logger.Debug( "targeting workspace, ignoring v1 buf.work.yaml, just building on module at target", - zap.String("input path", bucketTargeting.InputPath()), + zap.String("input dir", bucketTargeting.InputDir()), ) return v1WorkspaceTargeting( ctx, config, bucket, bucketTargeting, - []string{bucketTargeting.InputPath()}, // Assume we are targeting only the module at the input path + []string{bucketTargeting.InputDir()}, // Assume we are targeting only the module at the input dir nil, ) } @@ -143,7 +143,7 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m } logger.Debug( "targeting workspace with no found buf.work.yaml or buf.yaml", - zap.String("input path", bucketTargeting.InputPath()), + zap.String("input dir", bucketTargeting.InputDir()), ) // We did not find any buf.work.yaml or buf.yaml, we invoke fallback logic. return fallbackWorkspaceTargeting( @@ -167,7 +167,7 @@ func v2WorkspaceTargeting( // --exclude-path flags resulted in no targeted modules. This condition is represented // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false // - // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputPath() was not + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDir() was not // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml // directories, and this is a system error - this should be verified before we reach this function. var hadIsTentativelyTargetModule bool @@ -179,14 +179,14 @@ func v2WorkspaceTargeting( moduleDirPath := moduleConfig.DirPath() moduleDirPaths = append(moduleDirPaths, moduleDirPath) bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // bucketTargeting.InputPath() is the input targetSubDirPath. We only want to target modules that are inside + // bucketTargeting.InputDir() is the input targetSubDirPath. We only want to target modules that are inside // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" // and "foo/bar/bat". // // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. - isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDir(), moduleDirPath, normalpath.Relative) // We ignore this check for proto file refs, since the input is considered the directory // of the proto file reference, which is unlikely to contain a module in its entirety. // In the future, it would be nice to handle this more elegently. @@ -218,9 +218,9 @@ func v2WorkspaceTargeting( } if !hadIsTentativelyTargetModule { // Check if the input is overlapping within a module dir path. If so, return a nicer - // error. In the future, we want to remove special treatment for input path, and it + // error. In the future, we want to remove special treatment for input dir, and it // should be treated just like any target path. - return nil, checkForOverlap(bucketTargeting.InputPath(), moduleDirPaths) + return nil, checkForOverlap(bucketTargeting.InputDir(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -248,7 +248,7 @@ func v1WorkspaceTargeting( // --exclude-path flags resulted in no targeted modules. This condition is represented // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false // - // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputPath() was not + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDir() was not // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml // directories, and this is a system error - this should be verified before we reach this function. var hadIsTentativelyTargetModule bool @@ -282,14 +282,14 @@ func v1WorkspaceTargeting( } } bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // We only want to target modules that are inside the bucketTargeting.InputPath(). - // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputPath() is "foo/bar", + // We only want to target modules that are inside the bucketTargeting.InputDir(). + // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputDir() is "foo/bar", // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" // and "foo/bar/bat". // // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. - isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputPath(), moduleDirPath, normalpath.Relative) + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDir(), moduleDirPath, normalpath.Relative) // We ignore this check for proto file refs, since the input is considered the directory // of the proto file reference, which is unlikely to contain a module in its entirety. // In the future, it would be nice to handle this more elegently. @@ -321,9 +321,9 @@ func v1WorkspaceTargeting( } if !hadIsTentativelyTargetModule { // Check if the input is overlapping within a module dir path. If so, return a nicer - // error. In the future, we want to remove special treatment for input path, and it + // error. In the future, we want to remove special treatment for input dir, and it // should be treated just like any target path. - return nil, checkForOverlap(bucketTargeting.InputPath(), moduleDirPaths) + return nil, checkForOverlap(bucketTargeting.InputDir(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -365,7 +365,7 @@ func fallbackWorkspaceTargeting( config, bucket, bucketTargeting, - []string{bucketTargeting.InputPath()}, + []string{bucketTargeting.InputDir()}, nil, ) } @@ -430,9 +430,9 @@ func fallbackWorkspaceTargeting( v1ModulePaths = v1BufWorkYAML.DirPaths() } // If we still have no v1 module paths, then we go to the final fallback and set a v1 - // module at the input path. + // module at the input dir. if len(v1ModulePaths) == 0 { - v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputPath()) + v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputDir()) } return v1WorkspaceTargeting( ctx, @@ -614,5 +614,5 @@ func checkForOverlap(inputPath string, moduleDirPaths []string) error { return fmt.Errorf("failed to build input %q because it is contained by directory %q", inputPath, moduleDirPath) } } - return fmt.Errorf("input path %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) + return fmt.Errorf("input dir %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) } diff --git a/private/buf/bufworkspace/workspace_test.go b/private/buf/bufworkspace/workspace_test.go index 949631e818..00b7c51f4b 100644 --- a/private/buf/bufworkspace/workspace_test.go +++ b/private/buf/bufworkspace/workspace_test.go @@ -73,7 +73,7 @@ func testBasic(t *testing.T, subDirPath string) { ) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputPath()) + require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputDir()) require.NoError(t, err) workspace, err := workspaceProvider.GetWorkspaceForBucket( @@ -153,7 +153,7 @@ func testBasic(t *testing.T, subDirPath string) { require.NoError(t, err) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, "common/money/proto", bucketTargeting.InputPath()) + require.Equal(t, "common/money/proto", bucketTargeting.InputDir()) require.Equal( t, []string{"common/money/proto/acme/money/v1/currency_code.proto"}, @@ -209,7 +209,7 @@ func TestUnusedDep(t *testing.T) { require.NoError(t, err) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, ".", bucketTargeting.InputPath()) + require.Equal(t, ".", bucketTargeting.InputDir()) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, From 5d6a67bff646a824537c86e3a5d1baee4d748210 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 17:25:22 -0500 Subject: [PATCH 12/29] Have slightly nicer fallback check for workspace targeting --- private/buf/buftarget/bucket_targeting.go | 10 +- .../buf/bufworkspace/workspace_targeting.go | 180 ++++++++++-------- 2 files changed, 105 insertions(+), 85 deletions(-) diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 61650b3a9f..fa16066422 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -127,19 +127,25 @@ func newBucketTargeting( // If a controlling workspace was found, we map the paths to the controlling workspace // because we'll be working with a workspace bucket. for i, targetPath := range targetPaths { - targetPath := normalpath.Normalize(targetPath) mappedTargetPath, err := normalpath.Rel(controllingWorkspace.Path(), targetPath) if err != nil { return nil, err } + mappedTargetPath, err = normalpath.NormalizeAndValidate(mappedTargetPath) + if err != nil { + return nil, err + } mappedTargetPaths[i] = mappedTargetPath } for i, targetExcludePath := range targetExcludePaths { - targetExcludePath := normalpath.Normalize(targetExcludePath) mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace.Path(), targetExcludePath) if err != nil { return nil, err } + mappedTargetExcludePath, err = normalpath.NormalizeAndValidate(mappedTargetExcludePath) + if err != nil { + return nil, err + } mappedTargetExcludePaths[i] = mappedTargetExcludePath } } diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index f5e5394a34..81400a0436 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -341,16 +341,15 @@ func v1WorkspaceTargeting( // fallbackWorkspaceTargeting is the fallback logic when there is no config override or // controlling workspace discovered through bucket targeting. // -// 1. We check if there are target paths. If there are no target paths, then we return -// a v1 workspace with the only module directory path as the input, since we have no other -// information to go on. +// 1. We check if the input is in a v1 module. If yes, then we simply use that. +// 2. If no v1 module was found for the input, we check the target paths, if there are any. +// For each target path, we check if it is part of a workspace or v1 module. // -// 2. If there are target paths, then we check from each target path, walking up, if there -// is a workspace or v1 module. +// a. If we find a v1 or v2 workspace, we ensure that all them resolve to the same workspace. +// b. If we find no workspace, we keep track of any v1 modules we find along the way. We +// then build those v1 modules as a v1 workspace. // -// a. If we find nothing, then same fallback as 1. -// b. If we find a v1 or v2 workspace, we ensure that all them resolve to the same workspace. -// c. If we find no workspace, then we can check for v1 module configs. +// 3. In the case where we find nothing, we set the input as a v1 module in a v1 workspace. func fallbackWorkspaceTargeting( ctx context.Context, logger *zap.Logger, @@ -358,76 +357,83 @@ func fallbackWorkspaceTargeting( bucket storage.ReadBucket, bucketTargeting buftarget.BucketTargeting, ) (*workspaceTargeting, error) { - // No target paths, return a v1 workspace with a single module dir path at the input - if len(bucketTargeting.TargetPaths()) == 0 { - return v1WorkspaceTargeting( - ctx, - config, - bucket, - bucketTargeting, - []string{bucketTargeting.InputDir()}, - nil, - ) - } - var v1BufWorkYAML bufconfig.BufWorkYAMLFile - var v2BufYAMLFile bufconfig.BufYAMLFile - var controllingWorkspacePath string var v1ModulePaths []string - for _, targetPath := range bucketTargeting.TargetPaths() { - controllingWorkspaceOrModule, err := checkForControllingWorkspaceOrV1Module( - ctx, - logger, - bucket, - targetPath, - ) - if err != nil { - return nil, err - } - if controllingWorkspaceOrModule != nil { - // v1 workspace found - if bufWorkYAMLFile := controllingWorkspaceOrModule.BufWorkYAMLFile(); bufWorkYAMLFile != nil { - if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { - return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) - } - controllingWorkspacePath = controllingWorkspaceOrModule.Path() - v1BufWorkYAML = bufWorkYAMLFile - continue + inputDirV1Module, err := checkForControllingWorkspaceOrV1Module( + ctx, + logger, + bucket, + bucketTargeting.InputDir(), + false, + ) + if err != nil { + return nil, err + } + if inputDirV1Module != nil { + v1ModulePaths = append(v1ModulePaths, inputDirV1Module.Path()) + } else { + // No v1 module found for the input, check the target paths if any exist + var v1BufWorkYAML bufconfig.BufWorkYAMLFile + var v2BufYAMLFile bufconfig.BufYAMLFile + var controllingWorkspacePath string + for _, targetPath := range bucketTargeting.TargetPaths() { + controllingWorkspaceOrModule, err := checkForControllingWorkspaceOrV1Module( + ctx, + logger, + bucket, + targetPath, + false, + ) + if err != nil { + return nil, err } - // v2 workspace or v1 module found - if bufYAMLFile := controllingWorkspaceOrModule.BufYAMLFile(); bufYAMLFile != nil { - if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + if controllingWorkspaceOrModule != nil { + // v1 workspace found + if bufWorkYAMLFile := controllingWorkspaceOrModule.BufWorkYAMLFile(); bufWorkYAMLFile != nil { if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) } controllingWorkspacePath = controllingWorkspaceOrModule.Path() - v2BufYAMLFile = bufYAMLFile + v1BufWorkYAML = bufWorkYAMLFile continue } - if bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { - v1ModulePaths = append(v1ModulePaths, controllingWorkspaceOrModule.Path()) + // v2 workspace or v1 module found + if bufYAMLFile := controllingWorkspaceOrModule.BufYAMLFile(); bufYAMLFile != nil { + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { + return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) + } + controllingWorkspacePath = controllingWorkspaceOrModule.Path() + v2BufYAMLFile = bufYAMLFile + continue + } + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { + v1ModulePaths = append(v1ModulePaths, controllingWorkspaceOrModule.Path()) + } } } } - } - // We should not have found a controlling workspace and also have a list of v1 modules we - // need to build. - // In the future, we may be able to support multi-building, but for now, we error. We have - // the option to prioritise the workspace here and drop the v1ModulePaths, but that might - // be confusing to users. - if controllingWorkspacePath != "" && len(v1ModulePaths) > 0 { - return nil, fmt.Errorf("found a workspace %q that does not contain all found modules: %v", controllingWorkspacePath, v1ModulePaths) - } - if v2BufYAMLFile != nil { - return v2WorkspaceTargeting( - ctx, - config, - bucket, - bucketTargeting, - v2BufYAMLFile, - ) - } - if v1BufWorkYAML != nil { - v1ModulePaths = v1BufWorkYAML.DirPaths() + // If multiple workspaces were found, we return an error since we don't support building + // multiple workspaces. + if v1BufWorkYAML != nil && v2BufYAMLFile != nil { + return nil, fmt.Errorf("multiple workspaces found") + } + // If we found a workspace and v1 modules that were not contained in the workspace, we + // do not support building multiple workspaces. + if controllingWorkspacePath != "" && len(v1ModulePaths) > 0 { + return nil, fmt.Errorf("found a workspace %q that does not contain all found modules: %v", controllingWorkspacePath, v1ModulePaths) + } + if v2BufYAMLFile != nil { + return v2WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + v2BufYAMLFile, + ) + } + if v1BufWorkYAML != nil { + v1ModulePaths = v1BufWorkYAML.DirPaths() + } } // If we still have no v1 module paths, then we go to the final fallback and set a v1 // module at the input dir. @@ -546,11 +552,18 @@ func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( // checkForControllingWorkspaceOrV1Module take a bucket and path, and walks up the bucket // from the base of the path, checking for a controlling workspace or v1 module. +// +// If ignoreWorkspaceCheck is set to true, then we only look for v1 modules. This is done +// for the input, since we already did the workspace check in the initial bucketTargeting. +// Note that this is something that we could build into the initial bucketTargeting, +// however, moving forward, we want to encourage users to move to v2 workspaces, so it is +// nice to be able to isolate this as fallback logic here. func checkForControllingWorkspaceOrV1Module( ctx context.Context, logger *zap.Logger, bucket storage.ReadBucket, path string, + ignoreWorkspaceCheck bool, ) (buftarget.ControllingWorkspace, error) { path = normalpath.Normalize(path) // Keep track of any v1 module found along the way. If we find a v1 or v2 workspace, we @@ -567,23 +580,24 @@ func checkForControllingWorkspaceOrV1Module( // Instead, we effectively do a do-while loop. curDirPath := path for { - // First, check for a v1 or v2 controlling workspace - controllingWorkspace, err := buftarget.TerminateAtControllingWorkspace( - ctx, - bucket, - curDirPath, - path, - ) - if err != nil { - return nil, err - } - if controllingWorkspace != nil { - logger.Debug( - "buffetch termination found", - zap.String("curDirPath", curDirPath), - zap.String("path", path), + if !ignoreWorkspaceCheck { + controllingWorkspace, err := buftarget.TerminateAtControllingWorkspace( + ctx, + bucket, + curDirPath, + path, ) - return controllingWorkspace, nil + if err != nil { + return nil, err + } + if controllingWorkspace != nil { + logger.Debug( + "buffetch termination found", + zap.String("curDirPath", curDirPath), + zap.String("path", path), + ) + return controllingWorkspace, nil + } } // Then check for a v1 module v1Module, err := buftarget.TerminateAtV1Module(ctx, bucket, curDirPath, path) From e1f62ce870b4a425e84b7f137108c67bc3168190 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 17:53:20 -0500 Subject: [PATCH 13/29] Fix some windows tests --- private/buf/cmd/buf/command/generate/generate_windows_test.go | 2 +- private/buf/cmd/buf/workspace_test.go | 2 +- .../workspacetests/other/proto/workspacetest/workspace_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/private/buf/cmd/buf/command/generate/generate_windows_test.go b/private/buf/cmd/buf/command/generate/generate_windows_test.go index 72701888bc..3cebdac767 100644 --- a/private/buf/cmd/buf/command/generate/generate_windows_test.go +++ b/private/buf/cmd/buf/command/generate/generate_windows_test.go @@ -127,7 +127,7 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { ``, // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - `Failure: excluded path "a\v3" contains targeted path "a\v3\foo", which means all paths in "a\v3\foo" will be excluded`, + `Failure: excluded path "testdata\paths\a\v3" contains targeted path "testdata\paths\a\v3\foo", which means all paths in "testdata\pathas\a\v3\foo" will be excluded`, "--output", tempDirPath, "--template", diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index db9c6b504f..e5a4e8f144 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1126,7 +1126,7 @@ func TestWorkspaceInputOverlapFail(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "proto/buf" because it is contained by directory "proto"`), + `Failure: failed to build input "proto/buf" because it is contained by directory "proto"`, "build", filepath.Join("testdata", "workspace", "fail", "overlap", "proto", "buf"), ) diff --git a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go index 68095f7134..66f944801e 100644 --- a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go +++ b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go @@ -177,7 +177,7 @@ func TestWorkspaceOverlapSubDirectory(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto"`), + `Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto"`, "build", filepath.Join("..", "one"), ) From 4121d872f76e71b23e474eaea496cc67b355d3cb Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 18:03:20 -0500 Subject: [PATCH 14/29] Fix ci --- private/buf/bufworkspace/option.go | 35 +------------------ .../workspace_dep_manager_provider.go | 2 -- 2 files changed, 1 insertion(+), 36 deletions(-) diff --git a/private/buf/bufworkspace/option.go b/private/buf/bufworkspace/option.go index 8412a1bf10..e1f7c48d22 100644 --- a/private/buf/bufworkspace/option.go +++ b/private/buf/bufworkspace/option.go @@ -29,23 +29,11 @@ type WorkspaceBucketOption interface { applyToWorkspaceBucketConfig(*workspaceBucketConfig) } -// WorkspaceDepManagerOption is an option for a new WorkspaceDepManager. -type WorkspaceDepManagerOption interface { - applyToWorkspaceDepManagerConfig(*workspaceDepManagerConfig) -} - // WorkspaceModuleKeyOption is an option for a new Workspace created by a ModuleKey. type WorkspaceModuleKeyOption interface { applyToWorkspaceModuleKeyConfig(*workspaceModuleKeyConfig) } -// WorkspaceBucketOrDepManagerOption is an option that applies to either creating -// a Workspace by Bucket, or creating a WorkspaceDepManager. -type WorkspaceBucketAndDepManagerOption interface { - WorkspaceBucketOption - WorkspaceDepManagerOption -} - // WorkspaceOption is an option for a new Workspace created by either a Bucket or ModuleKey. type WorkspaceBucketAndModuleKeyOption interface { WorkspaceBucketOption @@ -59,7 +47,7 @@ type WorkspaceBucketAndModuleKeyOption interface { // the foo/bar module. // // A TargetSubDirPath of "." is equivalent of not setting this option. -func WithTargetSubDirPath(targetSubDirPath string) WorkspaceBucketAndDepManagerOption { +func WithTargetSubDirPath(targetSubDirPath string) WorkspaceBucketOption { return &workspaceTargetSubDirPathOption{ targetSubDirPath: targetSubDirPath, } @@ -148,10 +136,6 @@ func (s *workspaceTargetSubDirPathOption) applyToWorkspaceBucketConfig(config *w config.targetSubDirPath = s.targetSubDirPath } -func (s *workspaceTargetSubDirPathOption) applyToWorkspaceDepManagerConfig(config *workspaceDepManagerConfig) { - config.targetSubDirPath = s.targetSubDirPath -} - type workspaceTargetPathsOption struct { targetPaths []string targetExcludePaths []string @@ -275,23 +259,6 @@ func newWorkspaceBucketConfig(options []WorkspaceBucketOption) (*workspaceBucket return config, nil } -type workspaceDepManagerConfig struct { - targetSubDirPath string -} - -func newWorkspaceDepManagerConfig(options []WorkspaceDepManagerOption) (*workspaceDepManagerConfig, error) { - config := &workspaceDepManagerConfig{} - for _, option := range options { - option.applyToWorkspaceDepManagerConfig(config) - } - var err error - config.targetSubDirPath, err = normalpath.NormalizeAndValidate(config.targetSubDirPath) - if err != nil { - return nil, err - } - return config, nil -} - type workspaceModuleKeyConfig struct { targetPaths []string targetExcludePaths []string diff --git a/private/buf/bufworkspace/workspace_dep_manager_provider.go b/private/buf/bufworkspace/workspace_dep_manager_provider.go index e9676fd160..9bd1284153 100644 --- a/private/buf/bufworkspace/workspace_dep_manager_provider.go +++ b/private/buf/bufworkspace/workspace_dep_manager_provider.go @@ -45,7 +45,6 @@ type WorkspaceDepManagerProvider interface { ctx context.Context, bucket storage.ReadWriteBucket, bucketTargeting buftarget.BucketTargeting, - options ...WorkspaceDepManagerOption, ) (WorkspaceDepManager, error) } @@ -81,7 +80,6 @@ func (w *workspaceDepManagerProvider) GetWorkspaceDepManager( ctx context.Context, bucket storage.ReadWriteBucket, bucketTargeting buftarget.BucketTargeting, - options ...WorkspaceDepManagerOption, ) (_ WorkspaceDepManager, retErr error) { controllingWorkspace := bucketTargeting.ControllingWorkspace() if controllingWorkspace == nil || controllingWorkspace.BufWorkYAMLFile() != nil { From e0498d0bf9f39f105bdb330dc8e0d8820b0fe55d Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 18:27:25 -0500 Subject: [PATCH 15/29] Fix windows test --- private/buf/cmd/buf/command/generate/generate_windows_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private/buf/cmd/buf/command/generate/generate_windows_test.go b/private/buf/cmd/buf/command/generate/generate_windows_test.go index 3cebdac767..71198d923e 100644 --- a/private/buf/cmd/buf/command/generate/generate_windows_test.go +++ b/private/buf/cmd/buf/command/generate/generate_windows_test.go @@ -127,7 +127,7 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { ``, // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - `Failure: excluded path "testdata\paths\a\v3" contains targeted path "testdata\paths\a\v3\foo", which means all paths in "testdata\pathas\a\v3\foo" will be excluded`, + `Failure: excluded path "testdata\paths\a\v3" contains targeted path "testdata\paths\a\v3\foo", which means all paths in "testdata\paths\a\v3\foo" will be excluded`, "--output", tempDirPath, "--template", From 7680c73e0ab41d28b70158b88e15295945d1e57e Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Mon, 4 Mar 2024 19:23:42 -0500 Subject: [PATCH 16/29] Clean-up some extraneous changes --- private/bufpkg/bufmodule/module.go | 21 +++++++++++++------ .../bufpkg/bufmodule/module_read_bucket.go | 2 +- private/pkg/storage/storageos/bucket.go | 7 +++++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/private/bufpkg/bufmodule/module.go b/private/bufpkg/bufmodule/module.go index 0ebdac339d..4da64d1a44 100644 --- a/private/bufpkg/bufmodule/module.go +++ b/private/bufpkg/bufmodule/module.go @@ -284,15 +284,24 @@ func newModule( return nil, syserror.New("moduleFullName not present and commitID present when constructing a remote Module") } - normalizeIfNotEmpty := func(path string) string { + normalizeAndValidateIfNotEmpty := func(path string) (string, error) { if path == "" { - return path + return path, nil } - return normalpath.Normalize(path) + return normalpath.NormalizeAndValidate(path) + } + targetPaths, err := slicesext.MapError(targetPaths, normalizeAndValidateIfNotEmpty) + if err != nil { + return nil, syserror.Wrap(err) + } + targetExcludePaths, err = slicesext.MapError(targetExcludePaths, normalizeAndValidateIfNotEmpty) + if err != nil { + return nil, syserror.Wrap(err) + } + protoFileTargetPath, err = normalizeAndValidateIfNotEmpty(protoFileTargetPath) + if err != nil { + return nil, syserror.Wrap(err) } - targetPaths = slicesext.Map(targetPaths, normalizeIfNotEmpty) - targetExcludePaths = slicesext.Map(targetExcludePaths, normalizeIfNotEmpty) - protoFileTargetPath = normalizeIfNotEmpty(protoFileTargetPath) module := &module{ ctx: ctx, diff --git a/private/bufpkg/bufmodule/module_read_bucket.go b/private/bufpkg/bufmodule/module_read_bucket.go index aab927640d..e0a61a9882 100644 --- a/private/bufpkg/bufmodule/module_read_bucket.go +++ b/private/bufpkg/bufmodule/module_read_bucket.go @@ -363,7 +363,7 @@ func (b *moduleReadBucket) WalkFileInfos( return protoFileTracker.validate() } - // If we are walking target files, then we track the module if it is the target module + // If we are walking all files, then we track the module if it is the target module if b.module.IsTarget() { protoFileTracker.trackModule(b.module) } diff --git a/private/pkg/storage/storageos/bucket.go b/private/pkg/storage/storageos/bucket.go index d2bb7e6974..19d712b4db 100644 --- a/private/pkg/storage/storageos/bucket.go +++ b/private/pkg/storage/storageos/bucket.go @@ -140,7 +140,10 @@ func (b *bucket) Walk( return err } // just in case - path = normalpath.Normalize(path) + path, err = normalpath.NormalizeAndValidate(path) + if err != nil { + return err + } if err := f( storageutil.NewObjectInfo( path, @@ -310,7 +313,7 @@ func (b *bucket) validateExternalPath(path string, externalPath string) error { } func (b *bucket) getExternalPrefix(prefix string) (string, error) { - prefix, err := normalpath.NormalizeAndValidate(prefix) + prefix, err := storageutil.ValidatePrefix(prefix) if err != nil { return "", err } From aa283a71fb6cb285ab6d460984ec3eb94d693a36 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Tue, 5 Mar 2024 14:38:37 -0500 Subject: [PATCH 17/29] Remove extraneous workspace configuration InputPath, TargetPaths, and TargetExcludePaths are now entirely handled by buftarget.BucketTargeting, so we can remove these workspace options. --- private/buf/bufctl/controller.go | 7 -- private/buf/buffetch/internal/reader.go | 27 ++++++ private/buf/buftarget/bucket_targeting.go | 5 +- private/buf/bufworkspace/option.go | 95 +------------------ .../buf/bufworkspace/workspace_targeting.go | 56 +++++++++++ 5 files changed, 88 insertions(+), 102 deletions(-) diff --git a/private/buf/bufctl/controller.go b/private/buf/bufctl/controller.go index 65a342da7b..1fbddaf06f 100644 --- a/private/buf/bufctl/controller.go +++ b/private/buf/bufctl/controller.go @@ -845,13 +845,6 @@ func (c *controller) getWorkspaceForSourceRef( retErr = multierr.Append(retErr, readBucketCloser.Close()) }() options := []bufworkspace.WorkspaceBucketOption{ - bufworkspace.WithTargetSubDirPath( - bucketTargeting.InputDir(), - ), - bufworkspace.WithTargetPaths( - bucketTargeting.TargetPaths(), - bucketTargeting.TargetExcludePaths(), - ), bufworkspace.WithConfigOverride( functionOptions.configOverride, ), diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 7b21e54577..69964c8022 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -33,6 +33,7 @@ import ( "github.com/bufbuild/buf/private/pkg/ioext" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/osext" + "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagearchive" "github.com/bufbuild/buf/private/pkg/storage/storagemem" @@ -571,6 +572,9 @@ func getReadBucketCloserForBucket( targetExcludePaths []string, terminateFunc buftarget.TerminateFunc, ) (ReadBucketCloser, buftarget.BucketTargeting, error) { + if err := validatePaths(inputSubDirPath, targetPaths, targetExcludePaths); err != nil { + return nil, nil, err + } bucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, @@ -877,6 +881,29 @@ func attemptToFixOSRootBucketPathErrors(fsRoot string, err error) error { return err } +func validatePaths( + inputSubDirPath string, + targetPaths []string, + targetExcludePaths []string, +) error { + if _, err := normalpath.NormalizeAndValidate(inputSubDirPath); err != nil { + return err + } + if _, err := slicesext.MapError( + targetPaths, + normalpath.NormalizeAndValidate, + ); err != nil { + return err + } + if _, err := slicesext.MapError( + targetPaths, + normalpath.NormalizeAndValidate, + ); err != nil { + return err + } + return nil +} + type getFileOptions struct { keepFileCompression bool } diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index fa16066422..4de199ac85 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -166,7 +166,10 @@ func mapControllingWorkspaceAndPath( path string, terminateFunc TerminateFunc, ) (ControllingWorkspace, string, error) { - path = normalpath.Normalize(path) + path, err := normalpath.NormalizeAndValidate(path) + if err != nil { + return nil, "", err + } // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at // the path. if terminateFunc == nil { diff --git a/private/buf/bufworkspace/option.go b/private/buf/bufworkspace/option.go index e1f7c48d22..0e34caa224 100644 --- a/private/buf/bufworkspace/option.go +++ b/private/buf/bufworkspace/option.go @@ -15,13 +15,8 @@ package bufworkspace import ( - "errors" - "fmt" - "path/filepath" - "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/slicesext" - "github.com/bufbuild/buf/private/pkg/syserror" ) // WorkspaceBucketOption is an option for a new Workspace created by a Bucket. @@ -40,19 +35,6 @@ type WorkspaceBucketAndModuleKeyOption interface { WorkspaceModuleKeyOption } -// This selects the specific directory within the Workspace bucket to target. -// -// Example: We have modules at foo/bar, foo/baz. "." will result in both -// modules being selected, so will "foo", but "foo/bar" will result in only -// the foo/bar module. -// -// A TargetSubDirPath of "." is equivalent of not setting this option. -func WithTargetSubDirPath(targetSubDirPath string) WorkspaceBucketOption { - return &workspaceTargetSubDirPathOption{ - targetSubDirPath: targetSubDirPath, - } -} - // WithProtoFileTargetPath returns a new WorkspaceBucketOption that specifically targets // a single .proto file, and optionally targets all other .proto files that are in the same package. // @@ -95,7 +77,7 @@ func WithIgnoreAndDisallowV1BufWorkYAMLs() WorkspaceBucketOption { // need to be built as non-targeted. // // These paths have to be within the subDirPath, if it exists. -func WithTargetPaths(targetPaths []string, targetExcludePaths []string) WorkspaceBucketAndModuleKeyOption { +func WithTargetPaths(targetPaths []string, targetExcludePaths []string) WorkspaceModuleKeyOption { return &workspaceTargetPathsOption{ targetPaths: targetPaths, targetExcludePaths: targetExcludePaths, @@ -128,24 +110,11 @@ func WithConfigOverride(configOverride string) WorkspaceBucketAndModuleKeyOption // *** PRIVATE *** -type workspaceTargetSubDirPathOption struct { - targetSubDirPath string -} - -func (s *workspaceTargetSubDirPathOption) applyToWorkspaceBucketConfig(config *workspaceBucketConfig) { - config.targetSubDirPath = s.targetSubDirPath -} - type workspaceTargetPathsOption struct { targetPaths []string targetExcludePaths []string } -func (t *workspaceTargetPathsOption) applyToWorkspaceBucketConfig(config *workspaceBucketConfig) { - config.targetPaths = t.targetPaths - config.targetExcludePaths = t.targetExcludePaths -} - func (t *workspaceTargetPathsOption) applyToWorkspaceModuleKeyConfig(config *workspaceModuleKeyConfig) { config.targetPaths = t.targetPaths config.targetExcludePaths = t.targetExcludePaths @@ -180,9 +149,6 @@ func (c *workspaceIgnoreAndDisallowV1BufWorkYAMLsOption) applyToWorkspaceBucketC } type workspaceBucketConfig struct { - targetSubDirPath string - targetPaths []string - targetExcludePaths []string protoFileTargetPath string includePackageFiles bool configOverride string @@ -194,68 +160,9 @@ func newWorkspaceBucketConfig(options []WorkspaceBucketOption) (*workspaceBucket for _, option := range options { option.applyToWorkspaceBucketConfig(config) } - var err error - config.targetSubDirPath = normalpath.Normalize(config.targetSubDirPath) - config.targetPaths, err = slicesext.MapError( - config.targetPaths, - normalpath.NormalizeAndValidate, - ) - if err != nil { - return nil, err - } - config.targetExcludePaths, err = slicesext.MapError( - config.targetExcludePaths, - normalpath.NormalizeAndValidate, - ) - if err != nil { - return nil, err - } if config.protoFileTargetPath != "" { config.protoFileTargetPath = normalpath.Normalize(config.protoFileTargetPath) } - if len(config.targetPaths) > 0 || len(config.targetExcludePaths) > 0 { - if config.protoFileTargetPath != "" { - // This is just a system error. We messed up and called both exclusive options. - return nil, syserror.New("cannot set targetPaths/targetExcludePaths with protoFileTargetPaths") - } - // These are actual user errors. This is us verifying --path and --exclude-path. - // An argument could be made this should be at a higher level for user errors, and then - // if it gets to this point, this should be a system error. - // - // We don't use --path, --exclude-path here because these paths have had ExternalPathToPath - // applied to them. Which is another argument to do this at a higher level. - for _, targetPath := range config.targetPaths { - if targetPath == config.targetSubDirPath { - return nil, errors.New("given input is equal to a value of --path - this has no effect and is disallowed") - } - // We want this to be deterministic. We don't have that many paths in almost all cases. - // This being n^2 shouldn't be a huge issue unless someone has a diabolical wrapping shell script. - // If this becomes a problem, there's optimizations we can do by turning targetExcludePaths into - // a map but keeping the index in config.targetExcludePaths around to prioritize what error - // message to print. - for _, targetExcludePath := range config.targetExcludePaths { - if targetPath == targetExcludePath { - unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return nil, fmt.Errorf("cannot set the same path for both --path and --exclude-path: %s", unnormalizedTargetPath) - } - // This is new post-refactor. Before, we gave precedence to --path. While a change, - // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - if normalpath.EqualsOrContainsPath(targetExcludePath, targetPath, normalpath.Relative) { - // We clean and unnormalize the target paths to show in the error message - unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return nil, fmt.Errorf(`excluded path "%s" contains targeted path "%s", which means all paths in "%s" will be excluded`, unnormalizedTargetExcludePath, unnormalizedTargetPath, unnormalizedTargetPath) - } - } - } - for _, targetExcludePath := range config.targetExcludePaths { - if targetExcludePath == config.targetSubDirPath { - unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(config.targetSubDirPath)) - unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - return nil, fmt.Errorf("given input %s is equal to a value of --exclude-path %s - this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) - } - } - } return config, nil } diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 81400a0436..90c7bafe2e 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io/fs" + "path/filepath" "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" @@ -75,6 +76,9 @@ func newWorkspaceTargeting( overrideBufYAMLFile bufconfig.BufYAMLFile, ignoreAndDisallowV1BufWorkYAMLs bool, ) (*workspaceTargeting, error) { + if err := validateBucketTargeting(bucketTargeting, config.protoFileTargetPath); err != nil { + return nil, err + } if overrideBufYAMLFile != nil { logger.Debug( "targeting workspace with config override", @@ -450,6 +454,58 @@ func fallbackWorkspaceTargeting( ) } +func validateBucketTargeting( + bucketTargeting buftarget.BucketTargeting, + protoFilePath string, +) error { + if protoFilePath != "" { + // We set the proto file path as a target path, which we handle in module targeting, + // so we expect len(bucketTargeting.TargetPaths()) to be exactly 1. + if len(bucketTargeting.TargetPaths()) != 1 || len(bucketTargeting.TargetExcludePaths()) > 0 { + // This is just a system error. We messed up and called both exclusive options. + return syserror.New("cannot set targetPaths/targetExcludePaths with protoFileTargetPaths") + } + } + // These are actual user errors. This is us verifying --path and --exclude-path. + // An argument could be made this should be at a higher level for user errors, and then + // if it gets to this point, this should be a system error. + // + // We don't use --path, --exclude-path here because these paths have had ExternalPathToPath + // applied to them. Which is another argument to do this at a higher level. + for _, targetPath := range bucketTargeting.TargetPaths() { + if targetPath == bucketTargeting.InputDir() { + return errors.New("given input is equal to a value of --path - this has no effect and is disallowed") + } + // We want this to be deterministic. We don't have that many paths in almost all cases. + // This being n^2 shouldn't be a huge issue unless someone has a diabolical wrapping shell script. + // If this becomes a problem, there's optimizations we can do by turning targetExcludePaths into + // a map but keeping the index in targetExcludePaths around to prioritize what error + // message to print. + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetPath == targetExcludePath { + unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) + return fmt.Errorf("cannot set the same path for both --path and --exclude-path: %s", unnormalizedTargetPath) + } + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + if normalpath.EqualsOrContainsPath(targetExcludePath, targetPath, normalpath.Relative) { + // We clean and unnormalize the target paths to show in the error message + unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) + unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) + return fmt.Errorf(`excluded path "%s" contains targeted path "%s", which means all paths in "%s" will be excluded`, unnormalizedTargetExcludePath, unnormalizedTargetPath, unnormalizedTargetPath) + } + } + } + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetExcludePath == bucketTargeting.InputDir() { + unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDir())) + unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) + return fmt.Errorf("given input %s is equal to a value of --exclude-path %s - this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) + } + } + return nil +} + func getMappedModuleBucketAndModuleTargeting( ctx context.Context, config *workspaceBucketConfig, From 80b7c2a77485242b8f05086af538edcede20c82f Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 16:16:20 -0500 Subject: [PATCH 18/29] TODO.txt problem solved --- TODO.txt | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/TODO.txt b/TODO.txt index ca2f30ea16..f1330e965b 100644 --- a/TODO.txt +++ b/TODO.txt @@ -12,15 +12,3 @@ NOTE: We are not allowing cross-workspace finding for include_package_files=true - Changed protoc-gen-buf-lint and protoc-gen-buf-breaking configuration, deprecated old configurations - Need to manually test protoc-gen-buf-breaking - -$ bothbuf build google/ads/admob/v1/admob_api.proto -OLD -OLD STDOUT: -OLD EXIT CODE: 100 -OLD STDERR: -google/ads/admob/v1/admob_api.proto:19:8:stat google/ads/admob/v1/admob_resources.proto: file does not exist -NEW -NEW STDOUT: -NEW EXIT CODE: 100 -NEW STDERR: -/Users/pedge/git/googleapis/google/ads/admob/v1/admob_api.proto:19:8:stat google/ads/admob/v1/admob_resources.proto: file does not exist From 0c6af1e04cca8ad12e076a698205bcdb37781fda Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 16:17:54 -0500 Subject: [PATCH 19/29] s/InputDir/InputDirPath/g --- .../buffetch/internal/read_bucket_closer.go | 2 +- .../buffetch/internal/read_write_bucket.go | 2 +- private/buf/buffetch/internal/reader.go | 6 +-- private/buf/buftarget/bucket_targeting.go | 4 +- .../workspace_dep_manager_provider.go | 2 +- .../buf/bufworkspace/workspace_targeting.go | 42 +++++++++---------- private/buf/bufworkspace/workspace_test.go | 6 +-- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index 79827f440e..c9a8dcf2b4 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -36,7 +36,7 @@ func newReadBucketCloser( bucketPath string, bucketTargeting buftarget.BucketTargeting, ) *readBucketCloser { - normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDir()) + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDirPath()) return &readBucketCloser{ ReadBucketCloser: storageReadBucketCloser, subDirPath: normalizedSubDirPath, diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index cd0f700087..767d581c8e 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -33,7 +33,7 @@ func newReadWriteBucket( bucketPath string, bucketTargeting buftarget.BucketTargeting, ) *readWriteBucket { - normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDir()) + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDirPath()) return &readWriteBucket{ ReadWriteBucket: storageReadWriteBucket, subDirPath: normalizedSubDirPath, diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 69964c8022..6d021588da 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -591,7 +591,7 @@ func getReadBucketCloserForBucket( if bucketTargeting.ControllingWorkspace() != nil { bucketPath = bucketTargeting.ControllingWorkspace().Path() } else { - bucketPath = bucketTargeting.InputDir() + bucketPath = bucketTargeting.InputDirPath() } if bucketPath != "." { inputBucket = storage.MapReadBucketCloser( @@ -722,7 +722,7 @@ func getReadWriteBucketForOS( return nil, nil, err } } - inputDir = osRootBucketTargeting.InputDir() + inputDir = osRootBucketTargeting.InputDirPath() bucketTargetPaths = osRootBucketTargeting.TargetPaths() bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() } else { @@ -751,7 +751,7 @@ func getReadWriteBucketForOS( if err != nil { return nil, nil, err } - inputDir, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputDir()) + inputDir, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputDirPath()) if err != nil { return nil, nil, err } diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 4de199ac85..1a27db136c 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -29,7 +29,7 @@ type BucketTargeting interface { // found. If not found, then this will be nil. ControllingWorkspace() ControllingWorkspace // InputDir returns the input directory relative to the root of the bucket - InputDir() string + InputDirPath() string // TargetPaths returns the target paths relative to the root of the bucket. TargetPaths() []string // TargetExcludePaths returns the target exclude paths relative to the root of the bucket. @@ -84,7 +84,7 @@ func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { return b.controllingWorkspace } -func (b *bucketTargeting) InputDir() string { +func (b *bucketTargeting) InputDirPath() string { return b.inputDir } diff --git a/private/buf/bufworkspace/workspace_dep_manager_provider.go b/private/buf/bufworkspace/workspace_dep_manager_provider.go index 9bd1284153..25846f7302 100644 --- a/private/buf/bufworkspace/workspace_dep_manager_provider.go +++ b/private/buf/bufworkspace/workspace_dep_manager_provider.go @@ -89,5 +89,5 @@ func (w *workspaceDepManagerProvider) GetWorkspaceDepManager( if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { return newWorkspaceDepManager(bucket, controllingWorkspace.Path(), true), nil } - return newWorkspaceDepManager(bucket, bucketTargeting.InputDir(), false), nil + return newWorkspaceDepManager(bucket, bucketTargeting.InputDirPath(), false), nil } diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 90c7bafe2e..94752b03c4 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -82,7 +82,7 @@ func newWorkspaceTargeting( if overrideBufYAMLFile != nil { logger.Debug( "targeting workspace with config override", - zap.String("input dir", bucketTargeting.InputDir()), + zap.String("input dir", bucketTargeting.InputDirPath()), ) switch fileVersion := overrideBufYAMLFile.FileVersion(); fileVersion { case bufconfig.FileVersionV1Beta1, bufconfig.FileVersionV1: @@ -91,7 +91,7 @@ func newWorkspaceTargeting( config, bucket, bucketTargeting, - []string{bucketTargeting.InputDir()}, + []string{bucketTargeting.InputDirPath()}, overrideBufYAMLFile, ) case bufconfig.FileVersionV2: @@ -105,7 +105,7 @@ func newWorkspaceTargeting( if controllingWorkspace.BufYAMLFile() != nil { logger.Debug( "targeting workspace based on v2 buf.yaml", - zap.String("input dir", bucketTargeting.InputDir()), + zap.String("input dir", bucketTargeting.InputDirPath()), ) return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, controllingWorkspace.BufYAMLFile()) } @@ -115,7 +115,7 @@ func newWorkspaceTargeting( // This means that we attempted to target a v1 workspace at the buf.work.yaml, not // an individual module within the v1 workspace defined in buf.work.yaml. // This is disallowed. - if bucketTargeting.InputDir() == "." { + if bucketTargeting.InputDirPath() == "." { return nil, errors.New(`Workspaces defined with buf.work.yaml cannot be updated or pushed, only the individual modules within a workspace can be updated or pushed. Workspaces defined with a v2 buf.yaml can be updated, see the migration documentation for more details.`) @@ -124,14 +124,14 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m // the workspace entirely, and just act as if the buf.work.yaml did not exist. logger.Debug( "targeting workspace, ignoring v1 buf.work.yaml, just building on module at target", - zap.String("input dir", bucketTargeting.InputDir()), + zap.String("input dir", bucketTargeting.InputDirPath()), ) return v1WorkspaceTargeting( ctx, config, bucket, bucketTargeting, - []string{bucketTargeting.InputDir()}, // Assume we are targeting only the module at the input dir + []string{bucketTargeting.InputDirPath()}, // Assume we are targeting only the module at the input dir nil, ) } @@ -147,7 +147,7 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m } logger.Debug( "targeting workspace with no found buf.work.yaml or buf.yaml", - zap.String("input dir", bucketTargeting.InputDir()), + zap.String("input dir", bucketTargeting.InputDirPath()), ) // We did not find any buf.work.yaml or buf.yaml, we invoke fallback logic. return fallbackWorkspaceTargeting( @@ -171,7 +171,7 @@ func v2WorkspaceTargeting( // --exclude-path flags resulted in no targeted modules. This condition is represented // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false // - // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDir() was not + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDirPath() was not // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml // directories, and this is a system error - this should be verified before we reach this function. var hadIsTentativelyTargetModule bool @@ -183,14 +183,14 @@ func v2WorkspaceTargeting( moduleDirPath := moduleConfig.DirPath() moduleDirPaths = append(moduleDirPaths, moduleDirPath) bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // bucketTargeting.InputDir() is the input targetSubDirPath. We only want to target modules that are inside + // bucketTargeting.InputDirPath() is the input targetSubDirPath. We only want to target modules that are inside // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" // and "foo/bar/bat". // // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. - isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDir(), moduleDirPath, normalpath.Relative) + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDirPath(), moduleDirPath, normalpath.Relative) // We ignore this check for proto file refs, since the input is considered the directory // of the proto file reference, which is unlikely to contain a module in its entirety. // In the future, it would be nice to handle this more elegently. @@ -224,7 +224,7 @@ func v2WorkspaceTargeting( // Check if the input is overlapping within a module dir path. If so, return a nicer // error. In the future, we want to remove special treatment for input dir, and it // should be treated just like any target path. - return nil, checkForOverlap(bucketTargeting.InputDir(), moduleDirPaths) + return nil, checkForOverlap(bucketTargeting.InputDirPath(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -252,7 +252,7 @@ func v1WorkspaceTargeting( // --exclude-path flags resulted in no targeted modules. This condition is represented // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false // - // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDir() was not + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDirPath() was not // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml // directories, and this is a system error - this should be verified before we reach this function. var hadIsTentativelyTargetModule bool @@ -286,14 +286,14 @@ func v1WorkspaceTargeting( } } bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // We only want to target modules that are inside the bucketTargeting.InputDir(). - // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputDir() is "foo/bar", + // We only want to target modules that are inside the bucketTargeting.InputDirPath(). + // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputDirPath() is "foo/bar", // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" // and "foo/bar/bat". // // This is new behavior - before, we required that you input an exact match for the module directory path, // but now, we will take all the modules underneath this workspace. - isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDir(), moduleDirPath, normalpath.Relative) + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDirPath(), moduleDirPath, normalpath.Relative) // We ignore this check for proto file refs, since the input is considered the directory // of the proto file reference, which is unlikely to contain a module in its entirety. // In the future, it would be nice to handle this more elegently. @@ -327,7 +327,7 @@ func v1WorkspaceTargeting( // Check if the input is overlapping within a module dir path. If so, return a nicer // error. In the future, we want to remove special treatment for input dir, and it // should be treated just like any target path. - return nil, checkForOverlap(bucketTargeting.InputDir(), moduleDirPaths) + return nil, checkForOverlap(bucketTargeting.InputDirPath(), moduleDirPaths) } if !hadIsTargetModule { // It would be nice to have a better error message than this in the long term. @@ -366,7 +366,7 @@ func fallbackWorkspaceTargeting( ctx, logger, bucket, - bucketTargeting.InputDir(), + bucketTargeting.InputDirPath(), false, ) if err != nil { @@ -442,7 +442,7 @@ func fallbackWorkspaceTargeting( // If we still have no v1 module paths, then we go to the final fallback and set a v1 // module at the input dir. if len(v1ModulePaths) == 0 { - v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputDir()) + v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputDirPath()) } return v1WorkspaceTargeting( ctx, @@ -473,7 +473,7 @@ func validateBucketTargeting( // We don't use --path, --exclude-path here because these paths have had ExternalPathToPath // applied to them. Which is another argument to do this at a higher level. for _, targetPath := range bucketTargeting.TargetPaths() { - if targetPath == bucketTargeting.InputDir() { + if targetPath == bucketTargeting.InputDirPath() { return errors.New("given input is equal to a value of --path - this has no effect and is disallowed") } // We want this to be deterministic. We don't have that many paths in almost all cases. @@ -497,8 +497,8 @@ func validateBucketTargeting( } } for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { - if targetExcludePath == bucketTargeting.InputDir() { - unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDir())) + if targetExcludePath == bucketTargeting.InputDirPath() { + unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDirPath())) unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) return fmt.Errorf("given input %s is equal to a value of --exclude-path %s - this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) } diff --git a/private/buf/bufworkspace/workspace_test.go b/private/buf/bufworkspace/workspace_test.go index 00b7c51f4b..4e6b83f7ae 100644 --- a/private/buf/bufworkspace/workspace_test.go +++ b/private/buf/bufworkspace/workspace_test.go @@ -73,7 +73,7 @@ func testBasic(t *testing.T, subDirPath string) { ) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputDir()) + require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputDirPath()) require.NoError(t, err) workspace, err := workspaceProvider.GetWorkspaceForBucket( @@ -153,7 +153,7 @@ func testBasic(t *testing.T, subDirPath string) { require.NoError(t, err) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, "common/money/proto", bucketTargeting.InputDir()) + require.Equal(t, "common/money/proto", bucketTargeting.InputDirPath()) require.Equal( t, []string{"common/money/proto/acme/money/v1/currency_code.proto"}, @@ -209,7 +209,7 @@ func TestUnusedDep(t *testing.T) { require.NoError(t, err) require.NotNil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) - require.Equal(t, ".", bucketTargeting.InputDir()) + require.Equal(t, ".", bucketTargeting.InputDirPath()) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, From b9e80d0fcc171f4d19006a7d036e857fe7add22f Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 16:18:11 -0500 Subject: [PATCH 20/29] Godoc --- private/buf/buftarget/bucket_targeting.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 1a27db136c..678b7cad3d 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -28,7 +28,7 @@ type BucketTargeting interface { // ControllingWorkpsace returns the information for the controlling workspace, if one was // found. If not found, then this will be nil. ControllingWorkspace() ControllingWorkspace - // InputDir returns the input directory relative to the root of the bucket + // InputDirPath returns the input directory relative to the root of the bucket InputDirPath() string // TargetPaths returns the target paths relative to the root of the bucket. TargetPaths() []string From adc6d29c03469f156eba965e958652afabbb4cb6 Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 16:20:58 -0500 Subject: [PATCH 21/29] Cleanups --- private/buf/buftarget/bucket_targeting.go | 41 +++++++++---------- .../buf/buftarget/controlling_workspace.go | 8 ++-- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go index 678b7cad3d..649d1dbe08 100644 --- a/private/buf/buftarget/bucket_targeting.go +++ b/private/buf/buftarget/bucket_targeting.go @@ -69,9 +69,7 @@ func NewBucketTargeting( // *** PRIVATE *** -var ( - _ BucketTargeting = &bucketTargeting{} -) +var _ BucketTargeting = &bucketTargeting{} type bucketTargeting struct { controllingWorkspace ControllingWorkspace @@ -80,25 +78,6 @@ type bucketTargeting struct { targetExcludePaths []string } -func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { - return b.controllingWorkspace -} - -func (b *bucketTargeting) InputDirPath() string { - return b.inputDir -} - -func (b *bucketTargeting) TargetPaths() []string { - return b.targetPaths -} - -func (b *bucketTargeting) TargetExcludePaths() []string { - return b.targetExcludePaths -} - -func (b *bucketTargeting) isBucketTargeting() { -} - func newBucketTargeting( ctx context.Context, logger *zap.Logger, @@ -157,6 +136,24 @@ func newBucketTargeting( }, nil } +func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { + return b.controllingWorkspace +} + +func (b *bucketTargeting) InputDirPath() string { + return b.inputDir +} + +func (b *bucketTargeting) TargetPaths() []string { + return b.targetPaths +} + +func (b *bucketTargeting) TargetExcludePaths() []string { + return b.targetExcludePaths +} + +func (*bucketTargeting) isBucketTargeting() {} + // mapControllingWorkspaceAndPath takes a bucket, path, and terminate func and returns the // controlling workspace and mapped path. func mapControllingWorkspaceAndPath( diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go index c7f42a2e97..65f2be7084 100644 --- a/private/buf/buftarget/controlling_workspace.go +++ b/private/buf/buftarget/controlling_workspace.go @@ -25,6 +25,8 @@ type ControllingWorkspace interface { BufWorkYAMLFile() bufconfig.BufWorkYAMLFile // Returns a buf.yaml that was found. This is empty if we are returning a buf.work.yaml. BufYAMLFile() bufconfig.BufYAMLFile + + isControllingWorkspace() } func NewControllingWorkspace( @@ -37,9 +39,7 @@ func NewControllingWorkspace( // *** PRIVATE *** -var ( - _ ControllingWorkspace = &controllingWorkspace{} -) +var _ ControllingWorkspace = &controllingWorkspace{} type controllingWorkspace struct { path string @@ -70,3 +70,5 @@ func (c *controllingWorkspace) BufWorkYAMLFile() bufconfig.BufWorkYAMLFile { func (c *controllingWorkspace) BufYAMLFile() bufconfig.BufYAMLFile { return c.bufYAMLFile } + +func (c *controllingWorkspace) isControllingWorkspace() {} From ebd0d76ffd7b72987b4c0616460f09d129d86b36 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Tue, 5 Mar 2024 16:33:25 -0500 Subject: [PATCH 22/29] Add godocs for ControllingWorkspace --- private/buf/buftarget/controlling_workspace.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go index 65f2be7084..f2fe177cbf 100644 --- a/private/buf/buftarget/controlling_workspace.go +++ b/private/buf/buftarget/controlling_workspace.go @@ -18,17 +18,24 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufconfig" ) +// ControllingWorkspace is the information for the ControllingWorkspace. type ControllingWorkspace interface { - // Path of the controlling workspace in the bucket. + // Path of the controlling workspace. This is a normalized path, relative to the root of + // the bucket the controlling workspace was found in. Semantically, this is where the v1 + // buf.work.yaml or v2 buf.yaml workspace configuration is located. Path() string - // Returns a buf.work.yaml file that was found. This is empty if we are retruning a buf.yaml. + // Returns a buf.work.yaml file that was found for the controlling workspace. + // This is empty if we are retruning a buf.yaml. BufWorkYAMLFile() bufconfig.BufWorkYAMLFile - // Returns a buf.yaml that was found. This is empty if we are returning a buf.work.yaml. + // Returns a buf.yaml that was found for the controlling workspace. + // This is empty if we are returning a buf.work.yaml. BufYAMLFile() bufconfig.BufYAMLFile isControllingWorkspace() } +// NewControllingWorkspace takes a path where the controlling workspace configuration is +// located and a workspace configuration file for the controlling workspace. func NewControllingWorkspace( path string, bufWorkYAMLFile bufconfig.BufWorkYAMLFile, From d9bdb947bb8777efcfd5ecc3fad0662af88b0a01 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Tue, 5 Mar 2024 17:17:59 -0500 Subject: [PATCH 23/29] Fallback logic should only check target paths if input is not a proto file ref --- private/buf/bufworkspace/workspace_targeting.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 94752b03c4..0be6960775 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -367,15 +367,16 @@ func fallbackWorkspaceTargeting( logger, bucket, bucketTargeting.InputDirPath(), - false, + true, ) if err != nil { return nil, err } if inputDirV1Module != nil { v1ModulePaths = append(v1ModulePaths, inputDirV1Module.Path()) - } else { - // No v1 module found for the input, check the target paths if any exist + } else if config.protoFileTargetPath == "" { + // No v1 module found for the input dir, if the input was not a protoFileRef, then + // check the target paths to see if a workspace or v1 module exists. var v1BufWorkYAML bufconfig.BufWorkYAMLFile var v2BufYAMLFile bufconfig.BufYAMLFile var controllingWorkspacePath string From 93681d83d21894068614588e7617b518ae07b9c8 Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 17:49:45 -0500 Subject: [PATCH 24/29] Better error message --- private/bufpkg/bufimage/bufimage.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/private/bufpkg/bufimage/bufimage.go b/private/bufpkg/bufimage/bufimage.go index 4cd11228a2..03efba9639 100644 --- a/private/bufpkg/bufimage/bufimage.go +++ b/private/bufpkg/bufimage/bufimage.go @@ -17,6 +17,7 @@ package bufimage import ( "context" "fmt" + "io/fs" "sort" "strings" @@ -799,7 +800,7 @@ func imageFileInfosWithOnlyTargetsAndTargetImportsRec( for _, imp := range imports { importImageFileInfo, ok := pathToImageFileInfo[imp] if !ok { - return fmt.Errorf("no ImageFileInfo for import %q", imp) + return fmt.Errorf("%s: import %q: %w", imageFileInfo.ExternalPath(), imp, fs.ErrNotExist) } if err := imageFileInfosWithOnlyTargetsAndTargetImportsRec(importImageFileInfo, pathToImageFileInfo, resultPaths); err != nil { return err From 62102b9e768124584ca8cd5d41f8e8bd80fe9ce5 Mon Sep 17 00:00:00 2001 From: Doria Keung Date: Tue, 5 Mar 2024 18:31:57 -0500 Subject: [PATCH 25/29] Add test case for proto file ref with no workspace or module --- private/buf/cmd/buf/buf_test.go | 25 +++++++++++++++++++ .../noworkspaceormodule/fail/import.proto | 7 ++++++ .../noworkspaceormodule/success/simple.proto | 7 ++++++ 3 files changed, 39 insertions(+) create mode 100644 private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto create mode 100644 private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index cadee5908d..3cd28d89b5 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -2288,6 +2288,31 @@ func TestConvertRoundTrip(t *testing.T) { }) } +func TestProtoFileNoWorkspaceOrModule(t *testing.T) { + t.Parallel() + // We can build a simple proto file re that does not belong to any workspace or module + // based on the directory of the input. + testRunStdout( + t, + nil, + 0, + "", + "build", + filepath.Join("testdata", "protofileref", "noworkspaceormodule", "success", "simple.proto"), + ) + // However, we should fail if there is any complexity (e.g. an import that cannot be + // resolved) since there is no workspace or module config to base this off of. + testRunStdoutStderrNoWarn( + t, + nil, + bufctl.ExitCodeFileAnnotation, + "", // no stdout + filepath.FromSlash(`testdata/protofileref/noworkspaceormodule/fail/import.proto:3:8:import "google/type/date.proto": file does not exist`), + "build", + filepath.Join("testdata", "protofileref", "noworkspaceormodule", "fail", "import.proto"), + ) +} + func testModInit(t *testing.T, expectedData string, document bool, name string, deps ...string) { tempDir := t.TempDir() baseArgs := []string{"mod", "init"} diff --git a/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto new file mode 100644 index 0000000000..3fa67867af --- /dev/null +++ b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +import "google/type/date.proto"; + +message A { + google.type.Date date = 1; +} diff --git a/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto new file mode 100644 index 0000000000..94ab31a637 --- /dev/null +++ b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package simple; + +message Foo { + string key = 1; +} From 16f05a6c1f80038707150c28f2c516e7eb53c9df Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 18:39:47 -0500 Subject: [PATCH 26/29] commit --- private/buf/cmd/buf/buf_test.go | 61 +++++++++++++++++++ .../buf/cmd/buf/command/lsfiles/lsfiles.go | 7 +++ .../bufpkg/bufmodule/module_read_bucket.go | 14 ++++- 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index cadee5908d..10da7c6c4e 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -26,12 +26,15 @@ import ( "github.com/bufbuild/buf/private/buf/bufcli" "github.com/bufbuild/buf/private/buf/bufctl" "github.com/bufbuild/buf/private/buf/cmd/buf/internal/internaltesting" + "github.com/bufbuild/buf/private/bufpkg/bufimage" + imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1" "github.com/bufbuild/buf/private/pkg/app/appcmd" "github.com/bufbuild/buf/private/pkg/app/appcmd/appcmdtesting" "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/bufbuild/buf/private/pkg/storage/storagetesting" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) var convertTestDataDir = filepath.Join("command", "convert", "testdata", "convert") @@ -1194,6 +1197,47 @@ breaking: ) } +func TestLsFilesOverlappingPaths(t *testing.T) { + t.Parallel() + // It should be OK to have paths that overlap, and ls-files (and other commands) + // should output the union. + testRunStdout( + t, + nil, + 0, + filepath.FromSlash(`testdata/paths/a/v3/a.proto +testdata/paths/a/v3/foo/bar.proto +testdata/paths/a/v3/foo/foo.proto`), + "ls-files", + filepath.Join("testdata", "paths"), + "--path", + filepath.Join("testdata", "paths", "a", "v3"), + "--path", + filepath.Join("testdata", "paths", "a", "v3", "foo"), + ) +} + +func TestBuildOverlappingPaths(t *testing.T) { + t.Parallel() + // This may differ from LsFilesOverlappingPaths as we do a build of an image here. + // Building of images results in bufmodule.ModuleSetToModuleReadBucketWithOnlyProtoFiles being + // called, which is the original source of the issue that resulted in this test. + testBuildLsFilesFormatImport( + t, + 0, + []string{ + `a/v3/a.proto`, + `a/v3/foo/bar.proto`, + `a/v3/foo/foo.proto`, + }, + filepath.Join("testdata", "paths"), + "--path", + filepath.Join("testdata", "paths", "a", "v3"), + "--path", + filepath.Join("testdata", "paths", "a", "v3", "foo"), + ) +} + func TestExportProto(t *testing.T) { t.Parallel() tempDir := t.TempDir() @@ -2288,6 +2332,23 @@ func TestConvertRoundTrip(t *testing.T) { }) } +// testBuildLsFilesFormatImport does effectively an ls-files, but via doing a build of an Image, and then +// listing the files from the image as if --format=import was set. +func testBuildLsFilesFormatImport(t *testing.T, expectedExitCode int, expectedFiles []string, buildArgs ...string) { + buffer := bytes.NewBuffer(nil) + testRun(t, expectedExitCode, nil, buffer, append([]string{"build", "-o", "-"}, buildArgs...)...) + protoImage := &imagev1.Image{} + err := proto.Unmarshal(buffer.Bytes(), protoImage) + require.NoError(t, err) + image, err := bufimage.NewImageForProto(protoImage) + require.NoError(t, err) + var paths []string + for _, imageFile := range image.Files() { + paths = append(paths, imageFile.Path()) + } + require.Equal(t, expectedFiles, paths) +} + func testModInit(t *testing.T, expectedData string, document bool, name string, deps ...string) { tempDir := t.TempDir() baseArgs := []string{"mod", "init"} diff --git a/private/buf/cmd/buf/command/lsfiles/lsfiles.go b/private/buf/cmd/buf/command/lsfiles/lsfiles.go index 93b678554b..b3d286f00b 100644 --- a/private/buf/cmd/buf/command/lsfiles/lsfiles.go +++ b/private/buf/cmd/buf/command/lsfiles/lsfiles.go @@ -39,6 +39,8 @@ const ( errorFormatFlagName = "error-format" includeImportsFlagName = "include-imports" includeImportableFlagName = "include-importable" + pathsFlagName = "path" + excludePathsFlagName = "exclude-path" disableSymlinksFlagName = "disable-symlinks" asImportPathsFlagName = "as-import-paths" @@ -76,6 +78,8 @@ type flags struct { Config string IncludeImports bool IncludeImportable bool + Paths []string + ExcludePaths []string DisableSymlinks bool // Deprecated. This flag no longer has any effect as we don't build images anymore. ErrorFormat string @@ -91,6 +95,8 @@ func newFlags() *flags { func (f *flags) Bind(flagSet *pflag.FlagSet) { bufcli.BindInputHashtag(flagSet, &f.InputHashtag) + bufcli.BindPaths(flagSet, &f.Paths, pathsFlagName) + bufcli.BindExcludePaths(flagSet, &f.ExcludePaths, excludePathsFlagName) bufcli.BindDisableSymlinks(flagSet, &f.DisableSymlinks, disableSymlinksFlagName) flagSet.StringVar( &f.Config, @@ -166,6 +172,7 @@ func run( imageFileInfos, err := controller.GetImportableImageFileInfos( ctx, input, + bufctl.WithTargetPaths(flags.Paths, flags.ExcludePaths), bufctl.WithConfigOverride(flags.Config), ) if err != nil { diff --git a/private/bufpkg/bufmodule/module_read_bucket.go b/private/bufpkg/bufmodule/module_read_bucket.go index e0a61a9882..a5601fe629 100644 --- a/private/bufpkg/bufmodule/module_read_bucket.go +++ b/private/bufpkg/bufmodule/module_read_bucket.go @@ -389,9 +389,21 @@ func (b *moduleReadBucket) WalkFileInfos( // // Use targetPaths instead of targetPathMap to have a deterministic iteration order at this level. if len(b.targetPaths) > 0 { + // Target paths may have overlapping files, for example if you do --path a --path a/b, + // you get the union of the files. We need to make sure that we only walk a given + // file path once. + seenPaths := make(map[string]struct{}) + multiTargetFileWalkFunc := func(objectInfo storage.ObjectInfo) error { + path := objectInfo.Path() + if _, ok := seenPaths[path]; ok { + return nil + } + seenPaths[path] = struct{}{} + return targetFileWalkFunc(objectInfo) + } for _, targetPath := range b.targetPaths { // Still need to determine IsTargetFile as a file could be excluded with excludeTargetPaths. - if err := bucket.Walk(ctx, targetPath, targetFileWalkFunc); err != nil { + if err := bucket.Walk(ctx, targetPath, multiTargetFileWalkFunc); err != nil { return err } } From 6965ae1693631ff87c419034f9c6ea4f056d07a6 Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 18:55:45 -0500 Subject: [PATCH 27/29] Error message cleanup --- private/buf/bufworkspace/module_targeting.go | 4 ++-- private/buf/bufworkspace/workspace_targeting.go | 8 +++++--- private/buf/cmd/buf/workspace_test.go | 12 ++++++------ 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index cbcd591a7a..bf686bb972 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -93,7 +93,7 @@ func newModuleTargeting( // We're just going to be realists in our error messages here. // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, // if we were really clever, we'd go back and just add this as a module path. - return nil, fmt.Errorf("module %q was specified with --path - specify this module path directly as an input", targetPath) + return nil, fmt.Errorf("module %q was specified with --path, specify this module path directly as an input", targetPath) } if normalpath.ContainsPath(moduleDirPath, targetPath, normalpath.Relative) { isTargetModule = true @@ -112,7 +112,7 @@ func newModuleTargeting( // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, // if we were really clever, we'd go back and just remove this as a module path if it was specified. // This really should be allowed - how else do you exclude from a workspace? - return nil, fmt.Errorf("module %q was specified with --exclude-path - this flag cannot be used to specify module directories", targetExcludePath) + return nil, fmt.Errorf("module %q was specified with --exclude-path, this flag cannot be used to specify module directories", targetExcludePath) } if normalpath.ContainsPath(moduleDirPath, targetExcludePath, normalpath.Relative) { moduleTargetExcludePath, err := normalpath.Rel(moduleDirPath, targetExcludePath) diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 0be6960775..f73bf87c97 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -475,7 +475,9 @@ func validateBucketTargeting( // applied to them. Which is another argument to do this at a higher level. for _, targetPath := range bucketTargeting.TargetPaths() { if targetPath == bucketTargeting.InputDirPath() { - return errors.New("given input is equal to a value of --path - this has no effect and is disallowed") + // The targetPath/InputDirPath may not equal something on the command line as we have done + // targeting via workspaces by now, so do not print them. + return errors.New("given input is equal to a value of --path, this has no effect and is disallowed") } // We want this to be deterministic. We don't have that many paths in almost all cases. // This being n^2 shouldn't be a huge issue unless someone has a diabolical wrapping shell script. @@ -485,7 +487,7 @@ func validateBucketTargeting( for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { if targetPath == targetExcludePath { unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return fmt.Errorf("cannot set the same path for both --path and --exclude-path: %s", unnormalizedTargetPath) + return fmt.Errorf("cannot set the same path for both --path and --exclude-path: %q", unnormalizedTargetPath) } // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. @@ -501,7 +503,7 @@ func validateBucketTargeting( if targetExcludePath == bucketTargeting.InputDirPath() { unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDirPath())) unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - return fmt.Errorf("given input %s is equal to a value of --exclude-path %s - this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) + return fmt.Errorf("given input %q is equal to a value of --exclude-path %q, this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) } } return nil diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index e5a4e8f144..d028d057a6 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1187,7 +1187,7 @@ func TestWorkspaceWithWorkspacePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input is equal to a value of --path - this has no effect and is disallowed`, + `Failure: given input is equal to a value of --path, this has no effect and is disallowed`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--path", @@ -1198,7 +1198,7 @@ func TestWorkspaceWithWorkspacePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input is equal to a value of --path - this has no effect and is disallowed`, + `Failure: given input is equal to a value of --path, this has no effect and is disallowed`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--path", @@ -1213,7 +1213,7 @@ func TestWorkspaceWithWorkspaceExcludePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input . is equal to a value of --exclude-path . - this would exclude everything`, + `Failure: given input "." is equal to a value of --exclude-path ".", this would exclude everything`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--exclude-path", @@ -1224,7 +1224,7 @@ func TestWorkspaceWithWorkspaceExcludePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input . is equal to a value of --exclude-path . - this would exclude everything`, + `Failure: given input "." is equal to a value of --exclude-path ".", this would exclude everything`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--exclude-path", @@ -1240,7 +1240,7 @@ func TestWorkspaceWithWorkspaceDirectoryPathFail(t *testing.T) { nil, 1, ``, - `Failure: module "proto" was specified with --path - specify this module path directly as an input`, + `Failure: module "proto" was specified with --path, specify this module path directly as an input`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--path", @@ -1251,7 +1251,7 @@ func TestWorkspaceWithWorkspaceDirectoryPathFail(t *testing.T) { nil, 1, ``, - `Failure: module "proto" was specified with --path - specify this module path directly as an input`, + `Failure: module "proto" was specified with --path, specify this module path directly as an input`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--path", From 7dcc76e1124a4d7d1dd7ae8cc6ffe20a044e187c Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 19:41:05 -0500 Subject: [PATCH 28/29] error messages --- private/buf/bufworkspace/workspace_targeting.go | 4 ++-- private/buf/cmd/buf/command/generate/generate_test.go | 2 +- private/buf/cmd/buf/workspace_test.go | 2 +- .../other/proto/workspacetest/workspace_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index f73bf87c97..37a9f23dd4 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -684,8 +684,8 @@ func checkForControllingWorkspaceOrV1Module( func checkForOverlap(inputPath string, moduleDirPaths []string) error { for _, moduleDirPath := range moduleDirPaths { if normalpath.ContainsPath(moduleDirPath, inputPath, normalpath.Relative) { - return fmt.Errorf("failed to build input %q because it is contained by directory %q", inputPath, moduleDirPath) + return fmt.Errorf("failed to build input %q because it is contained by module at path %q specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path", inputPath, moduleDirPath) } } - return fmt.Errorf("input dir %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) + return fmt.Errorf("input %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) } diff --git a/private/buf/cmd/buf/command/generate/generate_test.go b/private/buf/cmd/buf/command/generate/generate_test.go index 5d81c90e98..d7d4b78451 100644 --- a/private/buf/cmd/buf/command/generate/generate_test.go +++ b/private/buf/cmd/buf/command/generate/generate_test.go @@ -224,7 +224,7 @@ func TestOutputWithPathEqualToExclude(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: testdata/paths/a/v1/a.proto`), + filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: "testdata/paths/a/v1/a.proto"`), "--output", tempDirPath, "--template", diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index d028d057a6..f0037ba4ae 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1126,7 +1126,7 @@ func TestWorkspaceInputOverlapFail(t *testing.T) { nil, 1, ``, - `Failure: failed to build input "proto/buf" because it is contained by directory "proto"`, + `Failure: failed to build input "proto/buf" because it is contained by module at path "proto" specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path`, "build", filepath.Join("testdata", "workspace", "fail", "overlap", "proto", "buf"), ) diff --git a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go index 66f944801e..a363ac19a0 100644 --- a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go +++ b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go @@ -177,7 +177,7 @@ func TestWorkspaceOverlapSubDirectory(t *testing.T) { nil, 1, ``, - `Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto"`, + `Failure: failed to build input "other/proto/one" because it is contained by module at path "other/proto" specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path`, "build", filepath.Join("..", "one"), ) From b7792f073cca056632c1d21cc75f59d7d2e41a3e Mon Sep 17 00:00:00 2001 From: bufdev Date: Tue, 5 Mar 2024 19:51:08 -0500 Subject: [PATCH 29/29] Test fixes --- private/buf/bufworkspace/workspace_targeting.go | 4 ++-- private/buf/cmd/buf/buf_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 37a9f23dd4..80e4b63c03 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -487,7 +487,7 @@ func validateBucketTargeting( for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { if targetPath == targetExcludePath { unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return fmt.Errorf("cannot set the same path for both --path and --exclude-path: %q", unnormalizedTargetPath) + return fmt.Errorf(`cannot set the same path for both --path and --exclude-path: "%s"`, unnormalizedTargetPath) } // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. @@ -503,7 +503,7 @@ func validateBucketTargeting( if targetExcludePath == bucketTargeting.InputDirPath() { unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDirPath())) unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - return fmt.Errorf("given input %q is equal to a value of --exclude-path %q, this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) + return fmt.Errorf(`given input "%s" is equal to a value of --exclude-path "%s", this would exclude everything`, unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) } } return nil diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index 256b79dd3c..5f662c33cb 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -2351,7 +2351,7 @@ func TestProtoFileNoWorkspaceOrModule(t *testing.T) { nil, bufctl.ExitCodeFileAnnotation, "", // no stdout - filepath.FromSlash(`testdata/protofileref/noworkspaceormodule/fail/import.proto:3:8:import "google/type/date.proto": file does not exist`), + filepath.FromSlash(`testdata/protofileref/noworkspaceormodule/fail/import.proto:3:8:import "`)+`google/type/date.proto": file does not exist`, "build", filepath.Join("testdata", "protofileref", "noworkspaceormodule", "fail", "import.proto"), )