diff --git a/TODO.txt b/TODO.txt index ca2f30ea16..f1330e965b 100644 --- a/TODO.txt +++ b/TODO.txt @@ -12,15 +12,3 @@ NOTE: We are not allowing cross-workspace finding for include_package_files=true - Changed protoc-gen-buf-lint and protoc-gen-buf-breaking configuration, deprecated old configurations - Need to manually test protoc-gen-buf-breaking - -$ bothbuf build google/ads/admob/v1/admob_api.proto -OLD -OLD STDOUT: -OLD EXIT CODE: 100 -OLD STDERR: -google/ads/admob/v1/admob_api.proto:19:8:stat google/ads/admob/v1/admob_resources.proto: file does not exist -NEW -NEW STDOUT: -NEW EXIT CODE: 100 -NEW STDERR: -/Users/pedge/git/googleapis/google/ads/admob/v1/admob_api.proto:19:8:stat google/ads/admob/v1/admob_resources.proto: file does not exist diff --git a/private/buf/bufctl/controller.go b/private/buf/bufctl/controller.go index 1df0501c71..1fbddaf06f 100644 --- a/private/buf/bufctl/controller.go +++ b/private/buf/bufctl/controller.go @@ -792,7 +792,7 @@ func (c *controller) getWorkspaceForProtoFileRef( // TODO FUTURE: Feed flag names through to here. return nil, fmt.Errorf("--exclude-path is not valid for use with .proto file references") } - readBucketCloser, err := c.buffetchReader.GetSourceReadBucketCloser( + readBucketCloser, bucketTargeting, err := c.buffetchReader.GetSourceReadBucketCloser( ctx, c.container, protoFileRef, @@ -804,19 +804,9 @@ func (c *controller) getWorkspaceForProtoFileRef( defer func() { retErr = multierr.Append(retErr, readBucketCloser.Close()) }() - // The ProtoFilePath is still relative to the input bucket, not the bucket - // retrieved from buffetch. Treat the path just as we do with targetPaths - // and externalPaths in withPathsForBucketExtender. - protoFilePath, err := readBucketCloser.PathForExternalPath(protoFileRef.ProtoFilePath()) - if err != nil { - return nil, err - } options := []bufworkspace.WorkspaceBucketOption{ - bufworkspace.WithTargetSubDirPath( - readBucketCloser.SubDirPath(), - ), bufworkspace.WithProtoFileTargetPath( - protoFilePath, + protoFileRef.ProtoFilePath(), protoFileRef.IncludePackageFiles(), ), bufworkspace.WithConfigOverride( @@ -832,6 +822,7 @@ func (c *controller) getWorkspaceForProtoFileRef( return c.workspaceProvider.GetWorkspaceForBucket( ctx, readBucketCloser, + bucketTargeting, options..., ) } @@ -841,7 +832,7 @@ func (c *controller) getWorkspaceForSourceRef( sourceRef buffetch.SourceRef, functionOptions *functionOptions, ) (_ bufworkspace.Workspace, retErr error) { - readBucketCloser, err := c.buffetchReader.GetSourceReadBucketCloser( + readBucketCloser, bucketTargeting, err := c.buffetchReader.GetSourceReadBucketCloser( ctx, c.container, sourceRef, @@ -853,18 +844,7 @@ func (c *controller) getWorkspaceForSourceRef( defer func() { retErr = multierr.Append(retErr, readBucketCloser.Close()) }() - functionOptions, err = functionOptions.withPathsForBucketExtender(readBucketCloser) - if err != nil { - return nil, err - } options := []bufworkspace.WorkspaceBucketOption{ - bufworkspace.WithTargetSubDirPath( - readBucketCloser.SubDirPath(), - ), - bufworkspace.WithTargetPaths( - functionOptions.targetPaths, - functionOptions.targetExcludePaths, - ), bufworkspace.WithConfigOverride( functionOptions.configOverride, ), @@ -878,6 +858,7 @@ func (c *controller) getWorkspaceForSourceRef( return c.workspaceProvider.GetWorkspaceForBucket( ctx, readBucketCloser, + bucketTargeting, options..., ) } @@ -887,7 +868,7 @@ func (c *controller) getWorkspaceDepManagerForDirRef( dirRef buffetch.DirRef, functionOptions *functionOptions, ) (_ bufworkspace.WorkspaceDepManager, retErr error) { - readWriteBucket, err := c.buffetchReader.GetDirReadWriteBucket( + readWriteBucket, bucketTargeting, err := c.buffetchReader.GetDirReadWriteBucket( ctx, c.container, dirRef, @@ -901,9 +882,7 @@ func (c *controller) getWorkspaceDepManagerForDirRef( return c.workspaceDepManagerProvider.GetWorkspaceDepManager( ctx, readWriteBucket, - bufworkspace.WithTargetSubDirPath( - readWriteBucket.SubDirPath(), - ), + bucketTargeting, ) } diff --git a/private/buf/bufctl/option.go b/private/buf/bufctl/option.go index 581f43f544..733e13130d 100644 --- a/private/buf/bufctl/option.go +++ b/private/buf/bufctl/option.go @@ -14,7 +14,9 @@ package bufctl -import "github.com/bufbuild/buf/private/buf/buffetch" +import ( + "github.com/bufbuild/buf/private/buf/buffetch" +) type ControllerOption func(*controller) @@ -144,28 +146,6 @@ func newFunctionOptions(controller *controller) *functionOptions { } } -func (f *functionOptions) withPathsForBucketExtender( - bucketExtender buffetch.BucketExtender, -) (*functionOptions, error) { - deref := *f - c := &deref - for i, inputTargetPath := range c.targetPaths { - targetPath, err := bucketExtender.PathForExternalPath(inputTargetPath) - if err != nil { - return nil, err - } - c.targetPaths[i] = targetPath - } - for i, inputTargetExcludePath := range c.targetExcludePaths { - targetExcludePath, err := bucketExtender.PathForExternalPath(inputTargetExcludePath) - if err != nil { - return nil, err - } - c.targetExcludePaths[i] = targetExcludePath - } - return c, nil -} - func (f *functionOptions) getGetReadBucketCloserOptions() []buffetch.GetReadBucketCloserOption { var getReadBucketCloserOptions []buffetch.GetReadBucketCloserOption if f.copyToInMemory { @@ -174,6 +154,18 @@ func (f *functionOptions) getGetReadBucketCloserOptions() []buffetch.GetReadBuck buffetch.GetReadBucketCloserWithCopyToInMemory(), ) } + if len(f.targetPaths) > 0 { + getReadBucketCloserOptions = append( + getReadBucketCloserOptions, + buffetch.GetReadBucketCloserWithTargetPaths(f.targetPaths), + ) + } + if len(f.targetExcludePaths) > 0 { + getReadBucketCloserOptions = append( + getReadBucketCloserOptions, + buffetch.GetReadBucketCloserWithTargetExcludePaths(f.targetExcludePaths), + ) + } if f.configOverride != "" { // If we have a config override, we do not search for buf.yamls or buf.work.yamls, // instead acting as if the config override was the only configuration file available. diff --git a/private/buf/buffetch/buffetch.go b/private/buf/buffetch/buffetch.go index 47efb7820c..5080b9db3e 100644 --- a/private/buf/buffetch/buffetch.go +++ b/private/buf/buffetch/buffetch.go @@ -22,6 +22,7 @@ import ( "net/http" "github.com/bufbuild/buf/private/buf/buffetch/internal" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" @@ -321,7 +322,7 @@ type SourceReader interface { container app.EnvStdinContainer, sourceRef SourceRef, options ...GetReadBucketCloserOption, - ) (ReadBucketCloser, error) + ) (ReadBucketCloser, buftarget.BucketTargeting, error) } // GetReadBucketCloserOption is an option for a GetSourceReadBucketCloser call. @@ -345,6 +346,22 @@ func GetReadBucketCloserWithNoSearch() GetReadBucketCloserOption { } } +// GetReadBucketCloserWithTargetPaths sets the targets paths for bucket targeting information +// returned with the bucket. +func GetReadBucketCloserWithTargetPaths(targetPaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetPaths = targetPaths + } +} + +// GetReadBucketCloserWithTargetExcludePaths sets the target exclude paths for bucket targeting +// information returned with the bucket. +func GetReadBucketCloserWithTargetExcludePaths(targetExcludePaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetExcludePaths = targetExcludePaths + } +} + // DirReader is a dir reader. type DirReader interface { // GetDirReadWriteBucket gets the dir bucket. @@ -353,7 +370,7 @@ type DirReader interface { container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, - ) (ReadWriteBucket, error) + ) (ReadWriteBucket, buftarget.BucketTargeting, error) } // GetReadWriteBucketOption is an option for a GetDirReadWriteBucket call. @@ -369,6 +386,22 @@ func GetReadWriteBucketWithNoSearch() GetReadWriteBucketOption { } } +// GetReadWriteBucketWithTargetPaths sets the target paths for the bucket targeting information +// returned with the bucket. +func GetReadWriteBucketWithTargetPaths(targetPaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetPaths = targetPaths + } +} + +// GetReadWriteBucketWithTargetExcludePaths sets the target exclude paths for the bucket +// targeting information returned with the bucket. +func GetReadWriteBucketWithTargetExcludePaths(targetExcludePaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetExcludePaths = targetExcludePaths + } +} + // ModuleFetcher is a module fetcher. type ModuleFetcher interface { // GetModuleKey gets the ModuleKey. @@ -544,8 +577,10 @@ func GetInputConfigForString( } type getReadBucketCloserOptions struct { - noSearch bool - copyToInMemory bool + noSearch bool + copyToInMemory bool + targetPaths []string + targetExcludePaths []string } func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { @@ -553,7 +588,9 @@ func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { } type getReadWriteBucketOptions struct { - noSearch bool + noSearch bool + targetPaths []string + targetExcludePaths []string } func newGetReadWriteBucketOptions() *getReadWriteBucketOptions { diff --git a/private/buf/buffetch/internal/internal.go b/private/buf/buffetch/internal/internal.go index 74eacb1fe5..9ef20508df 100644 --- a/private/buf/buffetch/internal/internal.go +++ b/private/buf/buffetch/internal/internal.go @@ -21,6 +21,7 @@ import ( "net/http" "strconv" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" @@ -183,7 +184,7 @@ type ProtoFileRef interface { BucketRef // Path is the normalized path to the file reference. Path() string - // IncludePackageFiles says to include the same package files TODO update comment + // IncludePackageFiles says to include the files from the same package files IncludePackageFiles() bool FileScheme() FileScheme protoFileRef() @@ -411,12 +412,6 @@ type BucketExtender interface { // the directory that contained this terminate file, and the subDirPath will be the sub-direftory of // the actual asset relative to the terminate file. SubDirPath() string - - // PathForExternalPath takes a path external to the asset and converts it to - // a path that is relative to the asset. - // - // The returned path will be normalized and validated. - PathForExternalPath(externalPath string) (string, error) } // ReadBucketCloser is a bucket returned from GetReadBucketCloser. @@ -443,23 +438,19 @@ type Reader interface { options ...GetFileOption, ) (io.ReadCloser, error) // GetReadBucketCloser gets the bucket. - // - // If done for a ProtoFileRef, the Bucket will be for the enclosing module or workspace via - // terminateFileNames or protoFileTerminateFileNames. No filtering of the Bucket is performed, this is - // the responsibility of the caller. GetReadBucketCloser( ctx context.Context, container app.EnvStdinContainer, bucketRef BucketRef, options ...GetReadBucketCloserOption, - ) (ReadBucketCloser, error) + ) (ReadBucketCloser, buftarget.BucketTargeting, error) // GetReadWriteBucket gets the bucket. GetReadWriteBucket( ctx context.Context, container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, - ) (ReadWriteBucket, error) + ) (ReadWriteBucket, buftarget.BucketTargeting, error) // GetModuleKey gets the ModuleKey. GetModuleKey( ctx context.Context, @@ -829,21 +820,23 @@ func WithGetReadBucketCloserCopyToInMemory() GetReadBucketCloserOption { // See bufconfig.TerminateAtControllingWorkspace, which is the only thing that uses this. // This is used by both non-ProtoFileRefs to find the controlling workspace, AND ProtoFileRefs // to find the controlling workspace of an enclosing module or workspace. -func WithGetReadBucketCloserTerminateFunc(terminateFunc TerminateFunc) GetReadBucketCloserOption { +func WithGetReadBucketCloserTerminateFunc(terminateFunc buftarget.TerminateFunc) GetReadBucketCloserOption { return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { getReadBucketCloserOptions.terminateFunc = terminateFunc } } -// WithGetReadBucketCloserProtoFileTerminateFunc is like WithGetReadBucketCloserTerminateFunc, but determines -// where to stop searching for the enclosing module or workspace when given a ProtoFileRef. -// -// See bufconfig.TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef, which is the only thing that uses this. -// This finds the enclosing module or workspace. -// This is only used for ProtoFileRefs. -func WithGetReadBucketCloserProtoFileTerminateFunc(protoFileTerminateFunc TerminateFunc) GetReadBucketCloserOption { +// WithGetReadBucketCloserTargetPaths sets the target paths for the bucket targeting information. +func WithGetReadBucketCloserTargetPaths(targetPaths []string) GetReadBucketCloserOption { return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { - getReadBucketCloserOptions.protoFileTerminateFunc = protoFileTerminateFunc + getReadBucketCloserOptions.targetPaths = targetPaths + } +} + +// WithGetReadBucketCloserTargetExcludePaths sets the target exclude paths for the bucket targeting information. +func WithGetReadBucketCloserTargetExcludePaths(targetExcludePaths []string) GetReadBucketCloserOption { + return func(getReadBucketCloserOptions *getReadBucketCloserOptions) { + getReadBucketCloserOptions.targetExcludePaths = targetExcludePaths } } @@ -857,24 +850,24 @@ type GetReadWriteBucketOption func(*getReadWriteBucketOptions) // See bufconfig.TerminateAtControllingWorkspace, which is the only thing that uses this. // This is used by both non-ProtoFileRefs to find the controlling workspace, AND ProtoFileRefs // to find the controlling workspace of an enclosing module or workspace. -func WithGetReadWriteBucketTerminateFunc(terminateFunc TerminateFunc) GetReadWriteBucketOption { +func WithGetReadWriteBucketTerminateFunc(terminateFunc buftarget.TerminateFunc) GetReadWriteBucketOption { return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { getReadWriteBucketOptions.terminateFunc = terminateFunc } } -// TerminateFunc is a termination function. -// -// This function should return true if the search should terminate at the prefix, that is -// the prefix should be where the bucket is mapped onto. -// -// The original subDirPath is also given to the TerminateFunc. -type TerminateFunc func( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (terminate bool, err error) +// WithGetReadWriteBucketTargetPaths sets the target paths for the bucket targeting information. +func WithGetReadWriteBucketTargetPaths(targetPaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetPaths = targetPaths + } +} + +func WithGetReadWriteBucketTargetExcludePaths(targetExcludePaths []string) GetReadWriteBucketOption { + return func(getReadWriteBucketOptions *getReadWriteBucketOptions) { + getReadWriteBucketOptions.targetExcludePaths = targetExcludePaths + } +} // PutFileOption is a PutFile option. type PutFileOption func(*putFileOptions) diff --git a/private/buf/buffetch/internal/read_bucket_closer.go b/private/buf/buffetch/internal/read_bucket_closer.go index 078a81b831..c9a8dcf2b4 100644 --- a/private/buf/buffetch/internal/read_bucket_closer.go +++ b/private/buf/buffetch/internal/read_bucket_closer.go @@ -17,6 +17,7 @@ package internal import ( "context" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagemem" @@ -27,33 +28,27 @@ var _ ReadBucketCloser = &readBucketCloser{} type readBucketCloser struct { storage.ReadBucketCloser - subDirPath string - pathForExternalPath func(string) (string, error) + subDirPath string } func newReadBucketCloser( storageReadBucketCloser storage.ReadBucketCloser, - subDirPath string, - pathForExternalPath func(string) (string, error), -) (*readBucketCloser, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(subDirPath) - if err != nil { - return nil, err - } + bucketPath string, + bucketTargeting buftarget.BucketTargeting, +) *readBucketCloser { + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDirPath()) return &readBucketCloser{ - ReadBucketCloser: storageReadBucketCloser, - subDirPath: normalizedSubDirPath, - pathForExternalPath: pathForExternalPath, - }, nil + ReadBucketCloser: storageReadBucketCloser, + subDirPath: normalizedSubDirPath, + } } func newReadBucketCloserForReadWriteBucket( readWriteBucket ReadWriteBucket, ) *readBucketCloser { return &readBucketCloser{ - ReadBucketCloser: storage.NopReadBucketCloser(readWriteBucket), - subDirPath: readWriteBucket.SubDirPath(), - pathForExternalPath: readWriteBucket.PathForExternalPath, + ReadBucketCloser: storage.NopReadBucketCloser(readWriteBucket), + subDirPath: readWriteBucket.SubDirPath(), } } @@ -61,10 +56,6 @@ func (r *readBucketCloser) SubDirPath() string { return r.subDirPath } -func (r *readBucketCloser) PathForExternalPath(externalPath string) (string, error) { - return r.pathForExternalPath(externalPath) -} - func (r *readBucketCloser) copyToInMemory(ctx context.Context) (*readBucketCloser, error) { storageReadBucket, err := storagemem.CopyReadBucket(ctx, r.ReadBucketCloser) if err != nil { @@ -75,8 +66,7 @@ func (r *readBucketCloser) copyToInMemory(ctx context.Context) (*readBucketClose ReadBucket: storageReadBucket, closeFunc: r.ReadBucketCloser.Close, }, - subDirPath: r.subDirPath, - pathForExternalPath: r.pathForExternalPath, + subDirPath: r.subDirPath, }, nil } diff --git a/private/buf/buffetch/internal/read_write_bucket.go b/private/buf/buffetch/internal/read_write_bucket.go index 96c7416a0a..767d581c8e 100644 --- a/private/buf/buffetch/internal/read_write_bucket.go +++ b/private/buf/buffetch/internal/read_write_bucket.go @@ -15,6 +15,7 @@ package internal import ( + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" ) @@ -24,30 +25,21 @@ var _ ReadWriteBucket = &readWriteBucket{} type readWriteBucket struct { storage.ReadWriteBucket - subDirPath string - pathForExternalPath func(string) (string, error) + subDirPath string } func newReadWriteBucket( storageReadWriteBucket storage.ReadWriteBucket, - subDirPath string, - pathForExternalPath func(string) (string, error), -) (*readWriteBucket, error) { - normalizedSubDirPath, err := normalpath.NormalizeAndValidate(subDirPath) - if err != nil { - return nil, err - } + bucketPath string, + bucketTargeting buftarget.BucketTargeting, +) *readWriteBucket { + normalizedSubDirPath := normalpath.Normalize(bucketTargeting.InputDirPath()) return &readWriteBucket{ - ReadWriteBucket: storageReadWriteBucket, - subDirPath: normalizedSubDirPath, - pathForExternalPath: pathForExternalPath, - }, nil + ReadWriteBucket: storageReadWriteBucket, + subDirPath: normalizedSubDirPath, + } } func (r *readWriteBucket) SubDirPath() string { return r.subDirPath } - -func (r *readWriteBucket) PathForExternalPath(externalPath string) (string, error) { - return r.pathForExternalPath(externalPath) -} diff --git a/private/buf/buffetch/internal/reader.go b/private/buf/buffetch/internal/reader.go index 54c2d896f4..6d021588da 100644 --- a/private/buf/buffetch/internal/reader.go +++ b/private/buf/buffetch/internal/reader.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/git" @@ -32,6 +33,7 @@ import ( "github.com/bufbuild/buf/private/pkg/ioext" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/osext" + "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagearchive" "github.com/bufbuild/buf/private/pkg/storage/storagemem" @@ -111,12 +113,11 @@ func (r *reader) GetReadBucketCloser( container app.EnvStdinContainer, bucketRef BucketRef, options ...GetReadBucketCloserOption, -) (retReadBucketCloser ReadBucketCloser, retErr error) { +) (retReadBucketCloser ReadBucketCloser, _ buftarget.BucketTargeting, retErr error) { getReadBucketCloserOptions := newGetReadBucketCloserOptions() for _, option := range options { option(getReadBucketCloserOptions) } - if getReadBucketCloserOptions.copyToInMemory { defer func() { if retReadBucketCloser != nil { @@ -134,31 +135,36 @@ func (r *reader) GetReadBucketCloser( } }() } - switch t := bucketRef.(type) { case ArchiveRef: return r.getArchiveBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) case DirRef: - readWriteBucket, err := r.getDirBucket( + readWriteBucket, bucketTargeting, err := r.getDirBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) if err != nil { - return nil, err + return nil, nil, err } - return newReadBucketCloserForReadWriteBucket(readWriteBucket), nil + return newReadBucketCloserForReadWriteBucket(readWriteBucket), bucketTargeting, nil case GitRef: return r.getGitBucket( ctx, container, t, + getReadBucketCloserOptions.targetPaths, + getReadBucketCloserOptions.targetExcludePaths, getReadBucketCloserOptions.terminateFunc, ) case ProtoFileRef: @@ -167,10 +173,9 @@ func (r *reader) GetReadBucketCloser( container, t, getReadBucketCloserOptions.terminateFunc, - getReadBucketCloserOptions.protoFileTerminateFunc, ) default: - return nil, fmt.Errorf("unknown BucketRef type: %T", bucketRef) + return nil, nil, fmt.Errorf("unknown BucketRef type: %T", bucketRef) } } @@ -179,7 +184,7 @@ func (r *reader) GetReadWriteBucket( container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, -) (ReadWriteBucket, error) { +) (ReadWriteBucket, buftarget.BucketTargeting, error) { getReadWriteBucketOptions := newGetReadWriteBucketOptions() for _, option := range options { option(getReadWriteBucketOptions) @@ -188,6 +193,8 @@ func (r *reader) GetReadWriteBucket( ctx, container, dirRef, + getReadWriteBucketOptions.targetPaths, + getReadWriteBucketOptions.targetExcludePaths, getReadWriteBucketOptions.terminateFunc, ) } @@ -234,15 +241,13 @@ func (r *reader) getArchiveBucket( ctx context.Context, container app.EnvStdinContainer, archiveRef ArchiveRef, - terminateFunc TerminateFunc, -) (_ ReadBucketCloser, retErr error) { - subDirPath, err := normalpath.NormalizeAndValidate(archiveRef.SubDirPath()) - if err != nil { - return nil, err - } + targetPaths []string, + targetExcludePaths []string, + terminateFunc buftarget.TerminateFunc, +) (ReadBucketCloser, buftarget.BucketTargeting, error) { readCloser, size, err := r.getFileReadCloserAndSize(ctx, container, archiveRef, false) if err != nil { - return nil, err + return nil, nil, err } readWriteBucket := storagemem.NewReadWriteBucket() switch archiveType := archiveRef.ArchiveType(); archiveType { @@ -255,21 +260,21 @@ func (r *reader) getArchiveBucket( archiveRef.StripComponents(), ), ); err != nil { - return nil, err + return nil, nil, err } case ArchiveTypeZip: var readerAt io.ReaderAt if size < 0 { data, err := io.ReadAll(readCloser) if err != nil { - return nil, err + return nil, nil, err } readerAt = bytes.NewReader(data) size = int64(len(data)) } else { readerAt, err = ioext.ReaderAtForReader(readCloser) if err != nil { - return nil, err + return nil, nil, err } } if err := storagearchive.Unzip( @@ -281,35 +286,52 @@ func (r *reader) getArchiveBucket( archiveRef.StripComponents(), ), ); err != nil { - return nil, err + return nil, nil, err } default: - return nil, fmt.Errorf("unknown ArchiveType: %v", archiveType) + return nil, nil, fmt.Errorf("unknown ArchiveType: %v", archiveType) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), subDirPath, terminateFunc) + return getReadBucketCloserForBucket( + ctx, + r.logger, + storage.NopReadBucketCloser(readWriteBucket), + archiveRef.SubDirPath(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getDirBucket( ctx context.Context, container app.EnvStdinContainer, dirRef DirRef, - terminateFunc TerminateFunc, -) (ReadWriteBucket, error) { + targetPaths []string, + targetExcludePaths []string, + terminateFunc buftarget.TerminateFunc, +) (ReadWriteBucket, buftarget.BucketTargeting, error) { if !r.localEnabled { - return nil, NewReadLocalDisabledError() + return nil, nil, NewReadLocalDisabledError() } - return getReadWriteBucketForOS(ctx, r.logger, r.storageosProvider, dirRef.Path(), terminateFunc) + return getReadWriteBucketForOS( + ctx, + r.logger, + r.storageosProvider, + dirRef.Path(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getProtoFileBucket( ctx context.Context, container app.EnvStdinContainer, protoFileRef ProtoFileRef, - terminateFunc TerminateFunc, - protoFileTerminateFunc TerminateFunc, -) (ReadBucketCloser, error) { + terminateFunc buftarget.TerminateFunc, +) (ReadBucketCloser, buftarget.BucketTargeting, error) { if !r.localEnabled { - return nil, NewReadLocalDisabledError() + return nil, nil, NewReadLocalDisabledError() } return getReadBucketCloserForOSProtoFile( ctx, @@ -317,7 +339,6 @@ func (r *reader) getProtoFileBucket( r.storageosProvider, protoFileRef.Path(), terminateFunc, - protoFileTerminateFunc, ) } @@ -325,21 +346,19 @@ func (r *reader) getGitBucket( ctx context.Context, container app.EnvStdinContainer, gitRef GitRef, - terminateFunc TerminateFunc, -) (_ ReadBucketCloser, retErr error) { + targetPaths []string, + targetExcludePaths []string, + terminateFunc buftarget.TerminateFunc, +) (ReadBucketCloser, buftarget.BucketTargeting, error) { if !r.gitEnabled { - return nil, NewReadGitDisabledError() + return nil, nil, NewReadGitDisabledError() } if r.gitCloner == nil { - return nil, errors.New("git cloner is nil") - } - subDirPath, err := normalpath.NormalizeAndValidate(gitRef.SubDirPath()) - if err != nil { - return nil, err + return nil, nil, errors.New("git cloner is nil") } gitURL, err := getGitURL(gitRef) if err != nil { - return nil, err + return nil, nil, err } readWriteBucket := storagemem.NewReadWriteBucket() if err := r.gitCloner.CloneToBucket( @@ -353,9 +372,17 @@ func (r *reader) getGitBucket( RecurseSubmodules: gitRef.RecurseSubmodules(), }, ); err != nil { - return nil, fmt.Errorf("could not clone %s: %v", gitURL, err) + return nil, nil, fmt.Errorf("could not clone %s: %v", gitURL, err) } - return getReadBucketCloserForBucket(ctx, r.logger, storage.NopReadBucketCloser(readWriteBucket), subDirPath, terminateFunc) + return getReadBucketCloserForBucket( + ctx, + r.logger, + storage.NopReadBucketCloser(readWriteBucket), + gitRef.SubDirPath(), + targetPaths, + targetExcludePaths, + terminateFunc, + ) } func (r *reader) getModuleKey( @@ -541,46 +568,46 @@ func getReadBucketCloserForBucket( logger *zap.Logger, inputBucket storage.ReadBucketCloser, inputSubDirPath string, - terminateFunc TerminateFunc, -) (ReadBucketCloser, error) { - mapPath, subDirPath, _, err := getMapPathAndSubDirPath(ctx, logger, inputBucket, inputSubDirPath, terminateFunc) + targetPaths []string, + targetExcludePaths []string, + terminateFunc buftarget.TerminateFunc, +) (ReadBucketCloser, buftarget.BucketTargeting, error) { + if err := validatePaths(inputSubDirPath, targetPaths, targetExcludePaths); err != nil { + return nil, nil, err + } + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + inputBucket, + inputSubDirPath, + targetPaths, + targetExcludePaths, + terminateFunc, + ) if err != nil { - return nil, err + return nil, nil, err } - if mapPath != "." { + var bucketPath string + if bucketTargeting.ControllingWorkspace() != nil { + bucketPath = bucketTargeting.ControllingWorkspace().Path() + } else { + bucketPath = bucketTargeting.InputDirPath() + } + if bucketPath != "." { inputBucket = storage.MapReadBucketCloser( inputBucket, - storage.MapOnPrefix(mapPath), + storage.MapOnPrefix(bucketPath), ) + // We return the same bucket targeting information, since we are using mappers to remap + // paths with the controlling workspace as the prefix when using the bucket. } logger.Debug( "buffetch creating new bucket", - zap.String("inputSubDirPath", inputSubDirPath), - zap.String("mapPath", mapPath), - zap.String("subDirPath", subDirPath), - ) - return newReadBucketCloser( - inputBucket, - subDirPath, - // This turns paths that were done relative to the root of the input into paths - // that are now relative to the mapped bucket. - // - // This happens if you do i.e. .git#subdir=foo/bar --path foo/bar/baz.proto - // We need to turn the path into baz.proto - func(externalPath string) (string, error) { - if filepath.IsAbs(externalPath) { - return "", fmt.Errorf("%s: absolute paths cannot be used for this input type", externalPath) - } - if !normalpath.EqualsOrContainsPath(mapPath, externalPath, normalpath.Relative) { - return "", fmt.Errorf("path %q from input does not contain path %q", mapPath, externalPath) - } - relPath, err := normalpath.Rel(mapPath, externalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(relPath) - }, + zap.String("bucketPath", bucketPath), + zap.Strings("targetPaths", bucketTargeting.TargetPaths()), ) + readBucketCloser := newReadBucketCloser(inputBucket, bucketPath, bucketTargeting) + return readBucketCloser, bucketTargeting, nil } // Use for directory-based buckets. @@ -589,73 +616,156 @@ func getReadWriteBucketForOS( logger *zap.Logger, storageosProvider storageos.Provider, inputDirPath string, - terminateFunc TerminateFunc, -) (ReadWriteBucket, error) { - inputDirPath = normalpath.Normalize(inputDirPath) - absInputDirPath, err := normalpath.NormalizeAndAbsolute(inputDirPath) + targetPaths []string, + targetExcludePaths []string, + terminateFunc buftarget.TerminateFunc, +) (ReadWriteBucket, buftarget.BucketTargeting, error) { + fsRoot, fsRootInputSubDirPath, err := fsRootAndFSRelPathForPath(inputDirPath) if err != nil { - return nil, err + return nil, nil, err + } + fsRootTargetPaths := make([]string, len(targetPaths)) + for i, targetPath := range targetPaths { + _, fsRootTargetPath, err := fsRootAndFSRelPathForPath(targetPath) + if err != nil { + return nil, nil, err + } + fsRootTargetPaths[i] = fsRootTargetPath + } + fsRootTargetExcludePaths := make([]string, len(targetExcludePaths)) + for i, targetExcludePath := range targetExcludePaths { + _, fsRootTargetExcludePath, err := fsRootAndFSRelPathForPath(targetExcludePath) + if err != nil { + return nil, nil, err + } + fsRootTargetExcludePaths[i] = fsRootTargetExcludePath } - // Split the absolute path into components to get the FS root - absInputDirPathComponents := normalpath.Components(absInputDirPath) - fsRoot := absInputDirPathComponents[0] osRootBucket, err := storageosProvider.NewReadWriteBucket( fsRoot, storageos.ReadWriteBucketWithSymlinksIfSupported(), ) if err != nil { - return nil, err - } - var inputSubDirPath string - if len(absInputDirPathComponents) > 1 { - // The first component is the FS root, so we check this is safe and join - // the rest of the components for the relative input subdir path. - inputSubDirPath = normalpath.Join(absInputDirPathComponents[1:]...) + return nil, nil, err } - mapPath, subDirPath, _, err := getMapPathAndSubDirPath( + osRootBucketTargeting, err := buftarget.NewBucketTargeting( ctx, logger, osRootBucket, - inputSubDirPath, + fsRootInputSubDirPath, + fsRootTargetPaths, + fsRootTargetExcludePaths, terminateFunc, ) if err != nil { - return nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) + return nil, nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) } - // Examples: + // osRootBucketTargeting returns the information on whether or not a controlling + // workspace was found based on the inputDirPath. + // + // *** CONTROLLING WOKRSPACE FOUND *** + // + // In the case where a controlling workspace is found, we want to create a new bucket + // for the controlling workspace. + // If the inputDirPath is an absolute path, we want to use an absolute path: + // + // bucketPath := fsRoot + controllingWorkspace.Path() + // + // If the inputDirPath is a relative path, we want to use a relative path between the + // current working directory (pwd) and controlling workspace. + // + // bucketPath := Rel(Rel(fsRoot, pwd), controllingWorkspace.Path()) + // + // We do not need to remap the input dir, target paths, and target exclude paths + // returned by buftarget.BucketTargeting, because they are already relative to the + // controlling workpsace. + // + // *** CONTROLLING WOKRSPACE NOT FOUND *** + // + // In the case where a controlling workpsace is not found, we have three outcomes for + // creating a new bucket. + // If the inputDirPath is an absolute path, we want to use an absolute path to the input + // path: + // + // bucketPath := Abs(inputDirPath) + // + // If the inputDirPath is a relative path, there are two possible outcomes: the input + // dir is within the context of the working directory or is outside of the context of + // the working directory. + // + // In the case where the input dir, is within the context of the working directory, we + // use pwd: + // + // bucketPath := Rel(fsRoot,pwd) // - // inputDirPath: path/to/foo - // terminateFileLocation: path/to - // returnMapPath: path/to - // returnSubDirPath: foo - // Make bucket on: Rel(pwd, returnMapPath) + // In the case where the input dir is outside the context of the working directory, we + // use the input dir relative to the pwd: // - // inputDirPath: /users/alice/path/to/foo - // terminateFileLocation: /users/alice/path/to - // returnMapPath: /users/alice/path/to - // returnSubDirPath: foo - // Make bucket on: FS root + returnMapPath (since absolute) + // bucketPath := Rel(Rel(fsRoot,pwd), Rel(fsRoot, inputDirPath)) + // + // For all cases where no controlling workspace was found, we need to remap the input + // path, target paths, and target exclude paths to the root of the new bucket. var bucketPath string - if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { - bucketPath = normalpath.Join(fsRoot, mapPath) - } else { - pwd, err := osext.Getwd() - if err != nil { - return nil, err + var inputDir string + bucketTargetPaths := make([]string, len(osRootBucketTargeting.TargetPaths())) + bucketTargetExcludePaths := make([]string, len(osRootBucketTargeting.TargetExcludePaths())) + if controllingWorkspace := osRootBucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { + if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { + bucketPath = normalpath.Join(fsRoot, osRootBucketTargeting.ControllingWorkspace().Path()) + } else { + // Relative input dir + pwdFSRelPath, err := getPWDFSRelPath() + if err != nil { + return nil, nil, err + } + bucketPath, err = normalpath.Rel(pwdFSRelPath, osRootBucketTargeting.ControllingWorkspace().Path()) + if err != nil { + return nil, nil, err + } } - // Removing the root so we can make mapPath relative. - // We are using normalpath.Components to split the path and remove the root (first component). - // The length of the root may vary depending on the OS and file path type (e.g. Windows paths), - // but normalpath.Components takes care of that. - pwdComponents := normalpath.Components(pwd) - if len(pwdComponents) > 1 { - pwd = normalpath.Normalize(normalpath.Join(pwdComponents[1:]...)) + inputDir = osRootBucketTargeting.InputDirPath() + bucketTargetPaths = osRootBucketTargeting.TargetPaths() + bucketTargetExcludePaths = osRootBucketTargeting.TargetExcludePaths() + } else { + // No controlling workspace found + if filepath.IsAbs(normalpath.Unnormalize(inputDirPath)) { + bucketPath = inputDirPath } else { - pwd = "" + // Relative input dir + pwdFSRelPath, err := getPWDFSRelPath() + if err != nil { + return nil, nil, err + } + if filepath.IsLocal(normalpath.Unnormalize(inputDirPath)) { + // Use current working directory + bucketPath = "." + } else { + // Relative input dir outside of working directory context + bucketPath, err = normalpath.Rel(pwdFSRelPath, fsRootInputSubDirPath) + if err != nil { + return nil, nil, err + } + } + } + // Map input dir, target paths, and target exclude paths to the new bucket path. + _, bucketPathFSRelPath, err := fsRootAndFSRelPathForPath(bucketPath) + if err != nil { + return nil, nil, err } - bucketPath, err = normalpath.Rel(pwd, mapPath) + inputDir, err = normalpath.Rel(bucketPathFSRelPath, osRootBucketTargeting.InputDirPath()) if err != nil { - return nil, err + return nil, nil, err + } + for i, targetPath := range osRootBucketTargeting.TargetPaths() { + bucketTargetPaths[i], err = normalpath.Rel(bucketPathFSRelPath, targetPath) + if err != nil { + return nil, nil, err + } + } + for i, targetExcludePath := range osRootBucketTargeting.TargetExcludePaths() { + bucketTargetExcludePaths[i], err = normalpath.Rel(bucketPathFSRelPath, targetExcludePath) + if err != nil { + return nil, nil, err + } } } bucket, err := storageosProvider.NewReadWriteBucket( @@ -663,35 +773,22 @@ func getReadWriteBucketForOS( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) if err != nil { - return nil, err + return nil, nil, err } - logger.Debug( - "creating new OS bucket for controlling workspace", - zap.String("inputDirPath", inputDirPath), - zap.String("workspacePath", bucketPath), - zap.String("subDirPath", subDirPath), - ) - return newReadWriteBucket( + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, bucket, - subDirPath, - // This function turns paths into paths relative to the bucket. - func(externalPath string) (string, error) { - absBucketPath, err := filepath.Abs(normalpath.Unnormalize(bucketPath)) - if err != nil { - return "", err - } - // We shouldn't actually need to unnormalize externalPath but we do anyways. - absExternalPath, err := filepath.Abs(normalpath.Unnormalize(externalPath)) - if err != nil { - return "", err - } - path, err := filepath.Rel(absBucketPath, absExternalPath) - if err != nil { - return "", err - } - return normalpath.NormalizeAndValidate(path) - }, + inputDir, + bucketTargetPaths, + bucketTargetExcludePaths, + terminateFunc, ) + if err != nil { + return nil, nil, err + } + readWriteBucket := newReadWriteBucket(bucket, bucketPath, bucketTargeting) + return readWriteBucket, bucketTargeting, nil } // Use for ProtoFileRefs. @@ -700,203 +797,57 @@ func getReadBucketCloserForOSProtoFile( logger *zap.Logger, storageosProvider storageos.Provider, protoFilePath string, - terminateFunc TerminateFunc, - protoFileTerminateFunc TerminateFunc, -) (ReadBucketCloser, error) { - // First, we figure out which directory we consider to be the module that encapsulates - // this ProtoFileRef. If we find a buf.yaml or buf.work.yaml, then we use that as the directory. If we - // do not, we use the current directory as the directory. - protoFileDirPath := normalpath.Dir(protoFilePath) - absProtoFileDirPath, err := normalpath.NormalizeAndAbsolute(protoFileDirPath) - if err != nil { - return nil, err - } - // Split the absolute path into components to get the FS root - absProtoFileDirPathComponents := normalpath.Components(absProtoFileDirPath) - fsRoot := absProtoFileDirPathComponents[0] - osRootBucket, err := storageosProvider.NewReadWriteBucket( - fsRoot, - storageos.ReadWriteBucketWithSymlinksIfSupported(), - ) - if err != nil { - return nil, err - } - var inputSubDirPath string - if len(absProtoFileDirPathComponents) > 1 { - // The first component is the FS root, so we check this is safe and join - // the rest of the components for the relative input subdir path. - inputSubDirPath = normalpath.Join(absProtoFileDirPathComponents[1:]...) - } - // mapPath is the path to the bucket that contains a buf.yaml. - // subDirPath is the relative path from mapPath to the protoFileDirPath, but we don't use it. - mapPath, _, terminate, err := getMapPathAndSubDirPath( + terminateFunc buftarget.TerminateFunc, +) (ReadBucketCloser, buftarget.BucketTargeting, error) { + // For proto file refs, we treat the input directory as the directory of + // the file and the file as a target path. + // No other target paths and target exclude paths are supported with + // proto file refs. + protoFileDir := normalpath.Dir(protoFilePath) + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, logger, - osRootBucket, - inputSubDirPath, - protoFileTerminateFunc, + storageosProvider, + protoFileDir, + []string{protoFilePath}, + nil, // no target exclude paths are supported for proto file refs + terminateFunc, ) if err != nil { - return nil, attemptToFixOSRootBucketPathErrors(fsRoot, err) - } - - var protoTerminateFileDirPath string - if !terminate { - // If we did not find a buf.yaml or buf.work.yaml, use the current directory. - // If the ProtoFileRef path was absolute, use an absolute path, otherwise relative. - // - // However, if the current directory does not contain the .proto file, we cannot use it, - // as we need the bucket to encapsulate the .proto file. In this case, we fall back - // to using the absolute directory of the .proto file. We need to do this because - // PathForExternalPath (defined in getReadWriteBucketForOS) needs to make sure that - // a given path can be made relative to the bucket, and be normalized and validated. - if filepath.IsAbs(normalpath.Unnormalize(protoFileDirPath)) { - pwd, err := osext.Getwd() - if err != nil { - return nil, err - } - protoTerminateFileDirPath = normalpath.Normalize(pwd) - } else { - protoTerminateFileDirPath = "." - } - absProtoTerminateFileDirPath, err := normalpath.NormalizeAndAbsolute(protoTerminateFileDirPath) - if err != nil { - return nil, err - } - if !normalpath.EqualsOrContainsPath(absProtoFileDirPath, absProtoTerminateFileDirPath, normalpath.Absolute) { - logger.Debug( - "did not find enclosing module or workspace for proto file ref and pwd does not encapsulate proto file", - zap.String("protoFilePath", protoFilePath), - zap.String("defaultingToAbsProtoFileDirPath", absProtoFileDirPath), - ) - protoTerminateFileDirPath = absProtoFileDirPath - } else { - logger.Debug( - "did not find enclosing module or workspace for proto file ref", - zap.String("protoFilePath", protoFilePath), - zap.String("defaultingToPwd", protoTerminateFileDirPath), - ) - } - } else { - // We found a buf.yaml or buf.work.yaml, use that directory. - // If we found a buf.yaml or buf.work.yaml and the ProtoFileRef path is absolute, use an absolute path, otherwise relative. - if filepath.IsAbs(normalpath.Unnormalize(protoFileDirPath)) { - protoTerminateFileDirPath = normalpath.Join(fsRoot, mapPath) - } else { - pwd, err := osext.Getwd() - if err != nil { - return nil, err - } - // Removing the root so we can make mapPath relative. - // We are using normalpath.Components to split the path and remove the root (first component). - // The length of the root may vary depending on the OS and file path type (e.g. Windows paths), - // but normalpath.Components takes care of that. - pwdComponents := normalpath.Components(pwd) - if len(pwdComponents) > 1 { - pwd = normalpath.Normalize(normalpath.Join(pwdComponents[1:]...)) - } else { - pwd = "" - } - protoTerminateFileDirPath, err = normalpath.Rel(pwd, mapPath) - if err != nil { - return nil, err - } - } - logger.Debug( - "found enclosing module or workspace for proto file ref", - zap.String("protoFilePath", protoFilePath), - zap.String("enclosingDirPath", protoTerminateFileDirPath), - ) + return nil, nil, err } - // Now, build a workspace bucket based on the directory we found. - // If the directory is a module directory, we'll get the enclosing workspace. - // If the directory is a workspace directory, this will effectively be a no-op. - readWriteBucket, err := getReadWriteBucketForOS(ctx, logger, storageosProvider, protoTerminateFileDirPath, terminateFunc) + return newReadBucketCloserForReadWriteBucket(readWriteBucket), bucketTargeting, nil +} + +// getPWDFSRelPath is a helper function that gets the relative path of the current working +// directory to the FS root. +func getPWDFSRelPath() (string, error) { + pwd, err := osext.Getwd() if err != nil { - return nil, err + return "", err } - return newReadBucketCloserForReadWriteBucket(readWriteBucket), nil + _, pwdFSRelPath, err := fsRootAndFSRelPathForPath(pwd) + if err != nil { + return "", err + } + return pwdFSRelPath, nil } -// Gets two values: -// -// - The directory relative to the bucket that the bucket should be mapped onto. -// - A new subDirPath that matches the inputSubDirPath but for the new ReadBucketCloser. -// -// Examples: -// -// inputSubDirPath: path/to/foo -// terminateFileLocation: path/to -// returnMapPath: path/to -// returnSubDirPath: foo -// -// inputSubDirPath: users/alice/path/to/foo -// terminateFileLocation: users/alice/path/to -// returnMapPath: users/alice/path/to -// returnSubDirPath: foo -// -// inputBucket: path/to/foo -// terminateFileLocation: NONE -// returnMapPath: path/to/foo -// returnSubDirPath: . - -// inputSubDirPath: . -// terminateFileLocation: NONE -// returnMapPath: . -// returnSubDirPath: . -func getMapPathAndSubDirPath( - ctx context.Context, - logger *zap.Logger, - inputBucket storage.ReadBucket, - inputSubDirPath string, - terminateFunc TerminateFunc, -) (mapPath string, subDirPath string, terminate bool, retErr error) { - inputSubDirPath, err := normalpath.NormalizeAndValidate(inputSubDirPath) +// fsRootAndFSRelPathForPath is a helper function that takes a path and returns the FS +// root and relative path to the FS root. +func fsRootAndFSRelPathForPath(path string) (string, string, error) { + absPath, err := normalpath.NormalizeAndAbsolute(path) if err != nil { - return "", "", false, err + return "", "", err } - // The for loops would take care of this base case, but we don't want - // to call storage.MapReadBucket unless we have to. - if terminateFunc == nil { - return inputSubDirPath, ".", false, nil - } - // We can't do this in a traditional loop like this: - // - // for curDirPath := inputSubDirPath; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { - // - // If we do that, then we don't run terminateFunc for ".", which we want to so that we get - // the correct value for the terminate bool. - // - // Instead, we effectively do a do-while loop. - curDirPath := inputSubDirPath - for { - terminate, err := terminateFunc(ctx, inputBucket, curDirPath, inputSubDirPath) - if err != nil { - return "", "", false, err - } - if terminate { - logger.Debug( - "buffetch termination found", - zap.String("curDirPath", curDirPath), - zap.String("inputSubDirPath", inputSubDirPath), - ) - subDirPath, err := normalpath.Rel(curDirPath, inputSubDirPath) - if err != nil { - return "", "", false, err - } - return curDirPath, subDirPath, true, nil - } - if curDirPath == "." { - // Do this instead. This makes this loop effectively a do-while loop. - break - } - curDirPath = normalpath.Dir(curDirPath) + // Split the absolute path into components to get the FS root + absPathComponents := normalpath.Components(absPath) + fsRoot := absPathComponents[0] + fsRelPath, err := normalpath.Rel(fsRoot, absPath) + if err != nil { + return "", "", err } - logger.Debug( - "buffetch no termination found", - zap.String("inputSubDirPath", inputSubDirPath), - ) - return inputSubDirPath, ".", false, nil + return fsRoot, fsRelPath, nil } // We attempt to fix up paths we get back to better printing to the user. @@ -930,6 +881,29 @@ func attemptToFixOSRootBucketPathErrors(fsRoot string, err error) error { return err } +func validatePaths( + inputSubDirPath string, + targetPaths []string, + targetExcludePaths []string, +) error { + if _, err := normalpath.NormalizeAndValidate(inputSubDirPath); err != nil { + return err + } + if _, err := slicesext.MapError( + targetPaths, + normalpath.NormalizeAndValidate, + ); err != nil { + return err + } + if _, err := slicesext.MapError( + targetPaths, + normalpath.NormalizeAndValidate, + ); err != nil { + return err + } + return nil +} + type getFileOptions struct { keepFileCompression bool } @@ -939,9 +913,10 @@ func newGetFileOptions() *getFileOptions { } type getReadBucketCloserOptions struct { - terminateFunc TerminateFunc - protoFileTerminateFunc TerminateFunc - copyToInMemory bool + terminateFunc buftarget.TerminateFunc + copyToInMemory bool + targetPaths []string + targetExcludePaths []string } func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { @@ -949,7 +924,9 @@ func newGetReadBucketCloserOptions() *getReadBucketCloserOptions { } type getReadWriteBucketOptions struct { - terminateFunc TerminateFunc + terminateFunc buftarget.TerminateFunc + targetPaths []string + targetExcludePaths []string } func newGetReadWriteBucketOptions() *getReadWriteBucketOptions { diff --git a/private/buf/buffetch/internal/reader_test.go b/private/buf/buffetch/internal/reader_test.go index 546545d296..11aa823a20 100644 --- a/private/buf/buffetch/internal/reader_test.go +++ b/private/buf/buffetch/internal/reader_test.go @@ -19,6 +19,7 @@ import ( "path/filepath" "testing" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/osext" "github.com/bufbuild/buf/private/pkg/storage" @@ -33,15 +34,18 @@ func TestGetReadBucketCloserForBucketNoTerminateFileName(t *testing.T) { ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Nil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, "three/four/five", readBucketCloser.SubDirPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.NoError(t, readBucketCloser.Close()) @@ -52,14 +56,17 @@ func TestGetReadBucketCloserTerminateFileName(t *testing.T) { ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) @@ -71,14 +78,17 @@ func TestGetReadBucketCloserForBucketNoSubDirPath(t *testing.T) { ctx := context.Background() inputBucket, err := storageos.NewProvider().NewReadWriteBucket("testdata/bufyaml/one/two/three/four/five") require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), ".", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) + require.Nil(t, bucketTargeting.ControllingWorkspace()) require.Equal(t, ".", readBucketCloser.SubDirPath()) _, err = readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) @@ -92,14 +102,17 @@ func TestGetReadBucketCloserForBucketAbs(t *testing.T) { require.NoError(t, err) inputBucket, err := storageos.NewProvider().NewReadWriteBucket(normalpath.Join(absDirPath, "testdata/bufyaml/one/two")) require.NoError(t, err) - readBucketCloser, err := getReadBucketCloserForBucket( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForBucket( ctx, zap.NewNop(), storage.NopReadBucketCloser(inputBucket), "three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, "three", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) _, err = readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) @@ -109,16 +122,19 @@ func TestGetReadBucketCloserForBucketAbs(t *testing.T) { func TestGetReadWriteBucketForOSNoTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five", + nil, // no target paths + nil, // no target exclude paths nil, ) require.NoError(t, err) - require.Equal(t, ".", readWriteBucket.SubDirPath()) - fileInfo, err := readWriteBucket.Stat(ctx, "buf.yaml") + require.Nil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, "testdata/bufyaml/one/two/three/four/five", readWriteBucket.SubDirPath()) + fileInfo, err := readWriteBucket.Stat(ctx, "testdata/bufyaml/one/two/three/four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) } @@ -126,14 +142,17 @@ func TestGetReadWriteBucketForOSNoTerminateFileName(t *testing.T) { func TestGetReadWriteBucketForOSTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) @@ -157,14 +176,17 @@ func TestGetReadWriteBucketForOSParentPwd(t *testing.T) { panic(r) } }() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), "five", + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) @@ -190,14 +212,17 @@ func TestGetReadWriteBucketForOSAbsPwd(t *testing.T) { panic(r) } }() - readWriteBucket, err := getReadWriteBucketForOS( + readWriteBucket, bucketTargeting, err := getReadWriteBucketForOS( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five"), + nil, // no target paths + nil, // no target exclude paths testNewTerminateAtFileNamesFunc("buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readWriteBucket.SubDirPath()) fileInfo, err := readWriteBucket.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) @@ -210,16 +235,18 @@ func TestGetReadWriteBucketForOSAbsPwd(t *testing.T) { func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", - nil, testNewTerminateAtFileNamesFunc("buf.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -229,16 +256,18 @@ func TestGetReadBucketCloserForOSProtoFileNoWorkspaceTerminateFileName(t *testin func TestGetReadBucketCloserForOSProtoFileTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/bufyaml/one/two/three/four/five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "testdata/bufyaml/one/two/three/four/five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -262,16 +291,18 @@ func TestGetReadBucketCloserForOSProtoFileParentPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, "five/buf.yaml", filepath.ToSlash(fileInfo.ExternalPath())) @@ -297,16 +328,18 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/proto/foo.proto"), testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/buf.yaml") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/bufyaml/one/two/three/four/five/buf.yaml"), filepath.ToSlash(fileInfo.ExternalPath())) @@ -319,16 +352,18 @@ func TestGetReadBucketCloserForOSProtoFileAbsPwd(t *testing.T) { func TestGetReadBucketCloserForOSProtoFileNoBufYAMLTerminateFileName(t *testing.T) { t.Parallel() ctx := context.Background() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -349,16 +384,18 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLParentPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), "five/proto/foo.proto", testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) - require.Equal(t, ".", readBucketCloser.SubDirPath()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "four/five/proto", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, "five/proto/foo.proto", filepath.ToSlash(fileInfo.ExternalPath())) @@ -385,16 +422,18 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { panic(r) } }() - readBucketCloser, err := getReadBucketCloserForOSProtoFile( + readBucketCloser, bucketTargeting, err := getReadBucketCloserForOSProtoFile( ctx, zap.NewNop(), storageos.NewProvider(), normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), testNewTerminateAtFileNamesFunc("buf.work.yaml"), - testNewTerminateAtFileNamesFunc("buf.yaml", "buf.work.yaml"), ) require.NoError(t, err) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) require.Equal(t, "four/five", readBucketCloser.SubDirPath()) + require.Len(t, bucketTargeting.TargetPaths(), 1) + require.Equal(t, "four/five/proto/foo.proto", bucketTargeting.TargetPaths()[0]) fileInfo, err := readBucketCloser.Stat(ctx, "four/five/proto/foo.proto") require.NoError(t, err) require.Equal(t, normalpath.Join(absDirPath, "testdata/nobufyaml/one/two/three/four/five/proto/foo.proto"), fileInfo.ExternalPath()) @@ -404,20 +443,21 @@ func TestGetReadBucketCloserForOSProtoFileNoBufYAMLAbsPwd(t *testing.T) { require.NoError(t, readBucketCloser.Close()) } -func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) TerminateFunc { - return TerminateFunc( +func testNewTerminateAtFileNamesFunc(terminateFileNames ...string) buftarget.TerminateFunc { + return buftarget.TerminateFunc( func( ctx context.Context, bucket storage.ReadBucket, prefix string, - originalSubDirPath string, - ) (bool, error) { + inputDir string, + ) (buftarget.ControllingWorkspace, error) { for _, terminateFileName := range terminateFileNames { + // We do not test for config file logic here, so it is fine to return empty configs. if _, err := bucket.Stat(ctx, normalpath.Join(prefix, terminateFileName)); err == nil { - return true, nil + return buftarget.NewControllingWorkspace(prefix, nil, nil), nil } } - return false, nil + return nil, nil }, ) } diff --git a/private/buf/buffetch/reader.go b/private/buf/buffetch/reader.go index b74603f134..1faa5123d4 100644 --- a/private/buf/buffetch/reader.go +++ b/private/buf/buffetch/reader.go @@ -20,7 +20,7 @@ import ( "net/http" "github.com/bufbuild/buf/private/buf/buffetch/internal" - "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/git" @@ -148,7 +148,7 @@ func (a *reader) GetSourceReadBucketCloser( container app.EnvStdinContainer, sourceRef SourceRef, options ...GetReadBucketCloserOption, -) (ReadBucketCloser, error) { +) (ReadBucketCloser, buftarget.BucketTargeting, error) { getReadBucketCloserOptions := newGetReadBucketCloserOptions() for _, option := range options { option(getReadBucketCloserOptions) @@ -157,8 +157,7 @@ func (a *reader) GetSourceReadBucketCloser( if !getReadBucketCloserOptions.noSearch { internalGetReadBucketCloserOptions = append( internalGetReadBucketCloserOptions, - internal.WithGetReadBucketCloserTerminateFunc(bufconfig.TerminateAtControllingWorkspace), - internal.WithGetReadBucketCloserProtoFileTerminateFunc(bufconfig.TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef), + internal.WithGetReadBucketCloserTerminateFunc(buftarget.TerminateAtControllingWorkspace), ) } if getReadBucketCloserOptions.copyToInMemory { @@ -167,6 +166,14 @@ func (a *reader) GetSourceReadBucketCloser( internal.WithGetReadBucketCloserCopyToInMemory(), ) } + internalGetReadBucketCloserOptions = append( + internalGetReadBucketCloserOptions, + internal.WithGetReadBucketCloserTargetPaths(getReadBucketCloserOptions.targetPaths), + ) + internalGetReadBucketCloserOptions = append( + internalGetReadBucketCloserOptions, + internal.WithGetReadBucketCloserTargetExcludePaths(getReadBucketCloserOptions.targetExcludePaths), + ) return a.internalReader.GetReadBucketCloser( ctx, container, @@ -180,7 +187,7 @@ func (a *reader) GetDirReadWriteBucket( container app.EnvStdinContainer, dirRef DirRef, options ...GetReadWriteBucketOption, -) (ReadWriteBucket, error) { +) (ReadWriteBucket, buftarget.BucketTargeting, error) { getReadWriteBucketOptions := newGetReadWriteBucketOptions() for _, option := range options { option(getReadWriteBucketOptions) @@ -189,9 +196,17 @@ func (a *reader) GetDirReadWriteBucket( if !getReadWriteBucketOptions.noSearch { internalGetReadWriteBucketOptions = append( internalGetReadWriteBucketOptions, - internal.WithGetReadWriteBucketTerminateFunc(bufconfig.TerminateAtControllingWorkspace), + internal.WithGetReadWriteBucketTerminateFunc(buftarget.TerminateAtControllingWorkspace), ) } + internalGetReadWriteBucketOptions = append( + internalGetReadWriteBucketOptions, + internal.WithGetReadWriteBucketTargetPaths(getReadWriteBucketOptions.targetPaths), + ) + internalGetReadWriteBucketOptions = append( + internalGetReadWriteBucketOptions, + internal.WithGetReadWriteBucketTargetExcludePaths(getReadWriteBucketOptions.targetExcludePaths), + ) return a.internalReader.GetReadWriteBucket( ctx, container, diff --git a/private/buf/buftarget/bucket_targeting.go b/private/buf/buftarget/bucket_targeting.go new file mode 100644 index 0000000000..649d1dbe08 --- /dev/null +++ b/private/buf/buftarget/bucket_targeting.go @@ -0,0 +1,212 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget + +import ( + "context" + + "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/storage" + "go.uber.org/zap" +) + +// BucketTargeting provides targeting information for the bucket based on any controlling +// workspaces that have been found. +type BucketTargeting interface { + // ControllingWorkpsace returns the information for the controlling workspace, if one was + // found. If not found, then this will be nil. + ControllingWorkspace() ControllingWorkspace + // InputDirPath returns the input directory relative to the root of the bucket + InputDirPath() string + // TargetPaths returns the target paths relative to the root of the bucket. + TargetPaths() []string + // TargetExcludePaths returns the target exclude paths relative to the root of the bucket. + TargetExcludePaths() []string + + isBucketTargeting() +} + +// NewBucketTargeting returns new targeting information for the given bucket, input dir, +// target paths, and target exclude paths. +// +// The inputDir, targetPaths, and targetExcludePaths are expected to be relative to the +// root of the bucket. +// +// If a controlling workspace is found, the input dir, target paths, and target exclude +// paths are mapped as relative paths to the controlling workspace path. +// Otherwise, the input dir, target paths, and target exclude paths are returned as-is. +func NewBucketTargeting( + ctx context.Context, + logger *zap.Logger, + bucket storage.ReadBucket, + inputDir string, + targetPaths []string, + targetExcludePaths []string, + terminateFunc TerminateFunc, +) (BucketTargeting, error) { + return newBucketTargeting( + ctx, + logger, + bucket, + inputDir, + targetPaths, + targetExcludePaths, + terminateFunc, + ) +} + +// *** PRIVATE *** + +var _ BucketTargeting = &bucketTargeting{} + +type bucketTargeting struct { + controllingWorkspace ControllingWorkspace + inputDir string + targetPaths []string + targetExcludePaths []string +} + +func newBucketTargeting( + ctx context.Context, + logger *zap.Logger, + bucket storage.ReadBucket, + inputDir string, + targetPaths []string, + targetExcludePaths []string, + terminateFunc TerminateFunc, +) (*bucketTargeting, error) { + // First we map the controlling workspace for the inputDir. + controllingWorkspace, mappedInputDir, err := mapControllingWorkspaceAndPath( + ctx, + logger, + bucket, + inputDir, + terminateFunc, + ) + if err != nil { + return nil, err + } + // If no controlling workspace was found, we map the target paths and target exclude + // paths to the input dir. + mappedTargetPaths := targetPaths + mappedTargetExcludePaths := targetExcludePaths + if controllingWorkspace != nil && controllingWorkspace.Path() != "." { + // If a controlling workspace was found, we map the paths to the controlling workspace + // because we'll be working with a workspace bucket. + for i, targetPath := range targetPaths { + mappedTargetPath, err := normalpath.Rel(controllingWorkspace.Path(), targetPath) + if err != nil { + return nil, err + } + mappedTargetPath, err = normalpath.NormalizeAndValidate(mappedTargetPath) + if err != nil { + return nil, err + } + mappedTargetPaths[i] = mappedTargetPath + } + for i, targetExcludePath := range targetExcludePaths { + mappedTargetExcludePath, err := normalpath.Rel(controllingWorkspace.Path(), targetExcludePath) + if err != nil { + return nil, err + } + mappedTargetExcludePath, err = normalpath.NormalizeAndValidate(mappedTargetExcludePath) + if err != nil { + return nil, err + } + mappedTargetExcludePaths[i] = mappedTargetExcludePath + } + } + return &bucketTargeting{ + controllingWorkspace: controllingWorkspace, + inputDir: mappedInputDir, + targetPaths: mappedTargetPaths, + targetExcludePaths: mappedTargetExcludePaths, + }, nil +} + +func (b *bucketTargeting) ControllingWorkspace() ControllingWorkspace { + return b.controllingWorkspace +} + +func (b *bucketTargeting) InputDirPath() string { + return b.inputDir +} + +func (b *bucketTargeting) TargetPaths() []string { + return b.targetPaths +} + +func (b *bucketTargeting) TargetExcludePaths() []string { + return b.targetExcludePaths +} + +func (*bucketTargeting) isBucketTargeting() {} + +// mapControllingWorkspaceAndPath takes a bucket, path, and terminate func and returns the +// controlling workspace and mapped path. +func mapControllingWorkspaceAndPath( + ctx context.Context, + logger *zap.Logger, + bucket storage.ReadBucket, + path string, + terminateFunc TerminateFunc, +) (ControllingWorkspace, string, error) { + path, err := normalpath.NormalizeAndValidate(path) + if err != nil { + return nil, "", err + } + // If no terminateFunc is passed, we can simply assume that we are mapping the bucket at + // the path. + if terminateFunc == nil { + return nil, path, nil + } + // We can't do this in a traditional loop like this: + // + // for curDirPath := path; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { + // + // If we do that, then we don't run terminateFunc for ".", which we want to so that we get + // the correct value for the terminate bool. + // + // Instead, we effectively do a do-while loop. + curDirPath := path + for { + controllingWorkspace, err := terminateFunc(ctx, bucket, curDirPath, path) + if err != nil { + return nil, "", err + } + if controllingWorkspace != nil { + logger.Debug( + "buffetch termination found", + zap.String("curDirPath", curDirPath), + zap.String("path", path), + ) + subDirPath, err := normalpath.Rel(curDirPath, path) + if err != nil { + return nil, "", err + } + return controllingWorkspace, subDirPath, nil + } + if curDirPath == "." { + break + } + curDirPath = normalpath.Dir(curDirPath) + } + logger.Debug( + "buffetch no termination found", + zap.String("path", path), + ) + // No controlling workspace is found, we simply return the input dir + return nil, path, nil +} diff --git a/private/buf/buftarget/buftarget.go b/private/buf/buftarget/buftarget.go new file mode 100644 index 0000000000..9bc8d96cd2 --- /dev/null +++ b/private/buf/buftarget/buftarget.go @@ -0,0 +1,15 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget diff --git a/private/buf/buftarget/controlling_workspace.go b/private/buf/buftarget/controlling_workspace.go new file mode 100644 index 0000000000..f2fe177cbf --- /dev/null +++ b/private/buf/buftarget/controlling_workspace.go @@ -0,0 +1,81 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget + +import ( + "github.com/bufbuild/buf/private/bufpkg/bufconfig" +) + +// ControllingWorkspace is the information for the ControllingWorkspace. +type ControllingWorkspace interface { + // Path of the controlling workspace. This is a normalized path, relative to the root of + // the bucket the controlling workspace was found in. Semantically, this is where the v1 + // buf.work.yaml or v2 buf.yaml workspace configuration is located. + Path() string + // Returns a buf.work.yaml file that was found for the controlling workspace. + // This is empty if we are retruning a buf.yaml. + BufWorkYAMLFile() bufconfig.BufWorkYAMLFile + // Returns a buf.yaml that was found for the controlling workspace. + // This is empty if we are returning a buf.work.yaml. + BufYAMLFile() bufconfig.BufYAMLFile + + isControllingWorkspace() +} + +// NewControllingWorkspace takes a path where the controlling workspace configuration is +// located and a workspace configuration file for the controlling workspace. +func NewControllingWorkspace( + path string, + bufWorkYAMLFile bufconfig.BufWorkYAMLFile, + bufYAMLFile bufconfig.BufYAMLFile, +) ControllingWorkspace { + return newControllingWorkspace(path, bufWorkYAMLFile, bufYAMLFile) +} + +// *** PRIVATE *** + +var _ ControllingWorkspace = &controllingWorkspace{} + +type controllingWorkspace struct { + path string + bufWorkYAMLFile bufconfig.BufWorkYAMLFile + bufYAMLFile bufconfig.BufYAMLFile +} + +func newControllingWorkspace( + path string, + bufWorkYAMLFile bufconfig.BufWorkYAMLFile, + bufYAMLFile bufconfig.BufYAMLFile, +) ControllingWorkspace { + return &controllingWorkspace{ + path: path, + bufWorkYAMLFile: bufWorkYAMLFile, + bufYAMLFile: bufYAMLFile, + } +} + +func (c *controllingWorkspace) Path() string { + return c.path +} + +func (c *controllingWorkspace) BufWorkYAMLFile() bufconfig.BufWorkYAMLFile { + return c.bufWorkYAMLFile +} + +func (c *controllingWorkspace) BufYAMLFile() bufconfig.BufYAMLFile { + return c.bufYAMLFile +} + +func (c *controllingWorkspace) isControllingWorkspace() {} diff --git a/private/buf/buftarget/terminate.go b/private/buf/buftarget/terminate.go new file mode 100644 index 0000000000..cc95f1a796 --- /dev/null +++ b/private/buf/buftarget/terminate.go @@ -0,0 +1,123 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buftarget + +import ( + "context" + "errors" + "fmt" + "io/fs" + + "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/storage" +) + +// TerminateFunc is a termination function. +type TerminateFunc func( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) + +// TerminateAtControllingWorkspace implements a TerminateFunc and returns controlling workspace +// if one is found at the given prefix. +func TerminateAtControllingWorkspace( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + return terminateAtControllingWorkspace(ctx, bucket, prefix, originalInputPath) +} + +// TerminateAtV1Module is a special terminate func that returns a controlling workspace with +// a v1 module confiugration if found at the given prefix. +func TerminateAtV1Module( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + return terminateAtV1Module(ctx, bucket, prefix, originalInputPath) +} + +// *** PRIVATE *** + +func terminateAtControllingWorkspace( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + bufWorkYAMLFile, err := bufconfig.GetBufWorkYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + bufWorkYAMLExists := err == nil + bufYAMLFile, err := bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + bufYAMLExists := err == nil + if bufWorkYAMLExists && bufYAMLExists { + // This isn't actually the external directory path, but we do the best we can here for now. + return nil, fmt.Errorf("cannot have a buf.work.yaml and buf.yaml in the same directory %q", prefix) + } + relDirPath, err := normalpath.Rel(prefix, originalInputPath) + if err != nil { + return nil, err + } + if bufYAMLExists && bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + if prefix == originalInputPath { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { + if normalpath.EqualsOrContainsPath(moduleConfig.DirPath(), relDirPath, normalpath.Relative) { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + } + } + if bufWorkYAMLExists { + // For v1 workspaces, we ensure that the module path list actually contains the original + // input paths. + if prefix == originalInputPath { + return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + } + for _, dirPath := range bufWorkYAMLFile.DirPaths() { + if normalpath.EqualsOrContainsPath(dirPath, relDirPath, normalpath.Relative) { + return newControllingWorkspace(prefix, bufWorkYAMLFile, nil), nil + } + } + } + return nil, nil +} + +func terminateAtV1Module( + ctx context.Context, + bucket storage.ReadBucket, + prefix string, + originalInputPath string, +) (ControllingWorkspace, error) { + bufYAMLFile, err := bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, prefix) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, err + } + if err == nil && bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { + return newControllingWorkspace(prefix, nil, bufYAMLFile), nil + } + return nil, nil +} diff --git a/private/buf/buftarget/usage.gen.go b/private/buf/buftarget/usage.gen.go new file mode 100644 index 0000000000..ad6e3b6834 --- /dev/null +++ b/private/buf/buftarget/usage.gen.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Generated. DO NOT EDIT. + +package buftarget + +import _ "github.com/bufbuild/buf/private/usage" diff --git a/private/buf/bufworkspace/module_targeting.go b/private/buf/bufworkspace/module_targeting.go index 9034b9b803..bf686bb972 100644 --- a/private/buf/bufworkspace/module_targeting.go +++ b/private/buf/bufworkspace/module_targeting.go @@ -17,18 +17,22 @@ package bufworkspace import ( "fmt" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/stringutil" + "github.com/bufbuild/buf/private/pkg/syserror" ) type moduleTargeting struct { // Whether this module is really a target module. // // False if this was not specified as a target module by the caller. - // Also false if there were config.targetPaths or config.protoFileTargetPath, but + // Also false if there were bucketTargeting.TargetPaths() or bucketTargeting.protoFileTargetPath, but // these paths did not match anything in the module. isTargetModule bool + // moduleDirPath is the directory path of the module + moduleDirPath string // relative to the actual moduleDirPath and the roots parsed from the buf.yaml moduleTargetPaths []string // relative to the actual moduleDirPath and the roots parsed from the buf.yaml @@ -41,86 +45,104 @@ type moduleTargeting struct { func newModuleTargeting( moduleDirPath string, roots []string, + bucketTargeting buftarget.BucketTargeting, config *workspaceBucketConfig, isTentativelyTargetModule bool, ) (*moduleTargeting, error) { if !isTentativelyTargetModule { // If this is not a target Module, we do not want to target anything, as targeting // paths for non-target Modules is an error. - return &moduleTargeting{}, nil + return &moduleTargeting{moduleDirPath: moduleDirPath}, nil } // If we have no target paths, then we always match the value of isTargetModule. // Otherwise, we need to see that at least one path matches the moduleDirPath for us // to consider this module a target. - isTargetModule := len(config.targetPaths) == 0 && config.protoFileTargetPath == "" + isTargetModule := len(bucketTargeting.TargetPaths()) == 0 && config.protoFileTargetPath == "" + var moduleTargetPaths []string var moduleTargetExcludePaths []string - for _, targetPath := range config.targetPaths { - if targetPath == moduleDirPath { - // We're just going to be realists in our error messages here. - // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, - // if we were really clever, we'd go back and just add this as a module path. - return nil, fmt.Errorf("module %q was specified with --path - specify this module path directly as an input", targetPath) + var moduleProtoFileTargetPath string + var includePackageFiles bool + if config.protoFileTargetPath != "" { + var err error + // We are currently returning the mapped proto file path as a target path in bucketTargeting. + // At the user level, we do not allow setting target paths or exclude paths for proto file + // references, so we do this check here. + if len(bucketTargeting.TargetPaths()) != 1 { + return nil, syserror.New("ProtoFileTargetPath not properly returned with bucketTargeting") } - if normalpath.ContainsPath(moduleDirPath, targetPath, normalpath.Relative) { + protoFileTargetPath := bucketTargeting.TargetPaths()[0] + if normalpath.ContainsPath(moduleDirPath, protoFileTargetPath, normalpath.Relative) { isTargetModule = true - moduleTargetPath, err := normalpath.Rel(moduleDirPath, targetPath) + moduleProtoFileTargetPath, err = normalpath.Rel(moduleDirPath, protoFileTargetPath) if err != nil { return nil, err } - moduleTargetPaths = append(moduleTargetPaths, moduleTargetPath) - } - } - for _, targetExcludePath := range config.targetExcludePaths { - if targetExcludePath == moduleDirPath { - // We're just going to be realists in our error messages here. - // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, - // if we were really clever, we'd go back and just remove this as a module path if it was specified. - // This really should be allowed - how else do you exclude from a workspace? - return nil, fmt.Errorf("module %q was specified with --exclude-path - this flag cannot be used to specify module directories", targetExcludePath) - } - if normalpath.ContainsPath(moduleDirPath, targetExcludePath, normalpath.Relative) { - moduleTargetExcludePath, err := normalpath.Rel(moduleDirPath, targetExcludePath) + moduleProtoFileTargetPath, err = applyRootsToTargetPath(roots, moduleProtoFileTargetPath, normalpath.Relative) if err != nil { return nil, err } - moduleTargetExcludePaths = append(moduleTargetExcludePaths, moduleTargetExcludePath) + includePackageFiles = config.includePackageFiles } - } - moduleTargetPaths, err := slicesext.MapError( - moduleTargetPaths, - func(moduleTargetPath string) (string, error) { - return applyRootsToTargetPath(roots, moduleTargetPath, normalpath.Relative) - }, - ) - if err != nil { - return nil, err - } - moduleTargetExcludePaths, err = slicesext.MapError( - moduleTargetExcludePaths, - func(moduleTargetExcludePath string) (string, error) { - return applyRootsToTargetPath(roots, moduleTargetExcludePath, normalpath.Relative) - }, - ) - if err != nil { - return nil, err - } - var moduleProtoFileTargetPath string - var includePackageFiles bool - if config.protoFileTargetPath != "" && - normalpath.ContainsPath(moduleDirPath, config.protoFileTargetPath, normalpath.Relative) { - isTargetModule = true - moduleProtoFileTargetPath, err = normalpath.Rel(moduleDirPath, config.protoFileTargetPath) + } else { + var err error + // We use the bucketTargeting.TargetPaths() instead of the workspace config target paths + // since those are stripped of the path to the module. + for _, targetPath := range bucketTargeting.TargetPaths() { + if targetPath == moduleDirPath { + // We're just going to be realists in our error messages here. + // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, + // if we were really clever, we'd go back and just add this as a module path. + return nil, fmt.Errorf("module %q was specified with --path, specify this module path directly as an input", targetPath) + } + if normalpath.ContainsPath(moduleDirPath, targetPath, normalpath.Relative) { + isTargetModule = true + moduleTargetPath, err := normalpath.Rel(moduleDirPath, targetPath) + if err != nil { + return nil, err + } + moduleTargetPaths = append(moduleTargetPaths, moduleTargetPath) + } + } + // We use the bucketTargeting.TargetExcludePaths() instead of the workspace config target + // exclude paths since those are stripped of the path to the module. + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetExcludePath == moduleDirPath { + // We're just going to be realists in our error messages here. + // TODO FUTURE: Do we error here currently? If so, this error remains. For extra credit in the future, + // if we were really clever, we'd go back and just remove this as a module path if it was specified. + // This really should be allowed - how else do you exclude from a workspace? + return nil, fmt.Errorf("module %q was specified with --exclude-path, this flag cannot be used to specify module directories", targetExcludePath) + } + if normalpath.ContainsPath(moduleDirPath, targetExcludePath, normalpath.Relative) { + moduleTargetExcludePath, err := normalpath.Rel(moduleDirPath, targetExcludePath) + if err != nil { + return nil, err + } + moduleTargetExcludePaths = append(moduleTargetExcludePaths, moduleTargetExcludePath) + } + } + moduleTargetPaths, err = slicesext.MapError( + moduleTargetPaths, + func(moduleTargetPath string) (string, error) { + return applyRootsToTargetPath(roots, moduleTargetPath, normalpath.Relative) + }, + ) if err != nil { return nil, err } - moduleProtoFileTargetPath, err = applyRootsToTargetPath(roots, moduleProtoFileTargetPath, normalpath.Relative) + moduleTargetExcludePaths, err = slicesext.MapError( + moduleTargetExcludePaths, + func(moduleTargetExcludePath string) (string, error) { + return applyRootsToTargetPath(roots, moduleTargetExcludePath, normalpath.Relative) + }, + ) if err != nil { return nil, err } - includePackageFiles = config.includePackageFiles } return &moduleTargeting{ + moduleDirPath: moduleDirPath, isTargetModule: isTargetModule, moduleTargetPaths: moduleTargetPaths, moduleTargetExcludePaths: moduleTargetExcludePaths, @@ -151,7 +173,7 @@ func applyRootsToTargetPath(roots []string, path string, pathType normalpath.Pat return "", err } // just in case - return normalpath.NormalizeAndValidate(targetPath) + return normalpath.Normalize(targetPath), nil default: // this should never happen return "", fmt.Errorf("%q is contained in multiple roots %s", path, stringutil.SliceToHumanStringQuoted(roots)) diff --git a/private/buf/bufworkspace/option.go b/private/buf/bufworkspace/option.go index 734218d024..0e34caa224 100644 --- a/private/buf/bufworkspace/option.go +++ b/private/buf/bufworkspace/option.go @@ -15,13 +15,8 @@ package bufworkspace import ( - "errors" - "fmt" - "path/filepath" - "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/slicesext" - "github.com/bufbuild/buf/private/pkg/syserror" ) // WorkspaceBucketOption is an option for a new Workspace created by a Bucket. @@ -29,42 +24,17 @@ type WorkspaceBucketOption interface { applyToWorkspaceBucketConfig(*workspaceBucketConfig) } -// WorkspaceDepManagerOption is an option for a new WorkspaceDepManager. -type WorkspaceDepManagerOption interface { - applyToWorkspaceDepManagerConfig(*workspaceDepManagerConfig) -} - // WorkspaceModuleKeyOption is an option for a new Workspace created by a ModuleKey. type WorkspaceModuleKeyOption interface { applyToWorkspaceModuleKeyConfig(*workspaceModuleKeyConfig) } -// WorkspaceBucketOrDepManagerOption is an option that applies to either creating -// a Workspace by Bucket, or creating a WorkspaceDepManager. -type WorkspaceBucketAndDepManagerOption interface { - WorkspaceBucketOption - WorkspaceDepManagerOption -} - // WorkspaceOption is an option for a new Workspace created by either a Bucket or ModuleKey. type WorkspaceBucketAndModuleKeyOption interface { WorkspaceBucketOption WorkspaceModuleKeyOption } -// This selects the specific directory within the Workspace bucket to target. -// -// Example: We have modules at foo/bar, foo/baz. "." will result in both -// modules being selected, so will "foo", but "foo/bar" will result in only -// the foo/bar module. -// -// A TargetSubDirPath of "." is equivalent of not setting this option. -func WithTargetSubDirPath(targetSubDirPath string) WorkspaceBucketAndDepManagerOption { - return &workspaceTargetSubDirPathOption{ - targetSubDirPath: targetSubDirPath, - } -} - // WithProtoFileTargetPath returns a new WorkspaceBucketOption that specifically targets // a single .proto file, and optionally targets all other .proto files that are in the same package. // @@ -107,7 +77,7 @@ func WithIgnoreAndDisallowV1BufWorkYAMLs() WorkspaceBucketOption { // need to be built as non-targeted. // // These paths have to be within the subDirPath, if it exists. -func WithTargetPaths(targetPaths []string, targetExcludePaths []string) WorkspaceBucketAndModuleKeyOption { +func WithTargetPaths(targetPaths []string, targetExcludePaths []string) WorkspaceModuleKeyOption { return &workspaceTargetPathsOption{ targetPaths: targetPaths, targetExcludePaths: targetExcludePaths, @@ -140,28 +110,11 @@ func WithConfigOverride(configOverride string) WorkspaceBucketAndModuleKeyOption // *** PRIVATE *** -type workspaceTargetSubDirPathOption struct { - targetSubDirPath string -} - -func (s *workspaceTargetSubDirPathOption) applyToWorkspaceBucketConfig(config *workspaceBucketConfig) { - config.targetSubDirPath = s.targetSubDirPath -} - -func (s *workspaceTargetSubDirPathOption) applyToWorkspaceDepManagerConfig(config *workspaceDepManagerConfig) { - config.targetSubDirPath = s.targetSubDirPath -} - type workspaceTargetPathsOption struct { targetPaths []string targetExcludePaths []string } -func (t *workspaceTargetPathsOption) applyToWorkspaceBucketConfig(config *workspaceBucketConfig) { - config.targetPaths = t.targetPaths - config.targetExcludePaths = t.targetExcludePaths -} - func (t *workspaceTargetPathsOption) applyToWorkspaceModuleKeyConfig(config *workspaceModuleKeyConfig) { config.targetPaths = t.targetPaths config.targetExcludePaths = t.targetExcludePaths @@ -196,9 +149,6 @@ func (c *workspaceIgnoreAndDisallowV1BufWorkYAMLsOption) applyToWorkspaceBucketC } type workspaceBucketConfig struct { - targetSubDirPath string - targetPaths []string - targetExcludePaths []string protoFileTargetPath string includePackageFiles bool configOverride string @@ -210,90 +160,8 @@ func newWorkspaceBucketConfig(options []WorkspaceBucketOption) (*workspaceBucket for _, option := range options { option.applyToWorkspaceBucketConfig(config) } - var err error - config.targetSubDirPath, err = normalpath.NormalizeAndValidate(config.targetSubDirPath) - if err != nil { - return nil, err - } - config.targetPaths, err = slicesext.MapError( - config.targetPaths, - normalpath.NormalizeAndValidate, - ) - if err != nil { - return nil, err - } - config.targetExcludePaths, err = slicesext.MapError( - config.targetExcludePaths, - normalpath.NormalizeAndValidate, - ) - if err != nil { - return nil, err - } if config.protoFileTargetPath != "" { - config.protoFileTargetPath, err = normalpath.NormalizeAndValidate(config.protoFileTargetPath) - if err != nil { - return nil, err - } - } - if len(config.targetPaths) > 0 || len(config.targetExcludePaths) > 0 { - if config.protoFileTargetPath != "" { - // This is just a system error. We messed up and called both exclusive options. - return nil, syserror.New("cannot set targetPaths/targetExcludePaths with protoFileTargetPaths") - } - // These are actual user errors. This is us verifying --path and --exclude-path. - // An argument could be made this should be at a higher level for user errors, and then - // if it gets to this point, this should be a system error. - // - // We don't use --path, --exclude-path here because these paths have had ExternalPathToPath - // applied to them. Which is another argument to do this at a higher level. - for _, targetPath := range config.targetPaths { - if targetPath == config.targetSubDirPath { - return nil, errors.New("given input is equal to a value of --path - this has no effect and is disallowed") - } - // We want this to be deterministic. We don't have that many paths in almost all cases. - // This being n^2 shouldn't be a huge issue unless someone has a diabolical wrapping shell script. - // If this becomes a problem, there's optimizations we can do by turning targetExcludePaths into - // a map but keeping the index in config.targetExcludePaths around to prioritize what error - // message to print. - for _, targetExcludePath := range config.targetExcludePaths { - if targetPath == targetExcludePath { - unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return nil, fmt.Errorf("cannot set the same path for both --path and --exclude-path: %s", unnormalizedTargetPath) - } - // This is new post-refactor. Before, we gave precedence to --path. While a change, - // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - if normalpath.EqualsOrContainsPath(targetExcludePath, targetPath, normalpath.Relative) { - // We clean and unnormalize the target paths to show in the error message - unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) - return nil, fmt.Errorf(`excluded path "%s" contains targeted path "%s", which means all paths in "%s" will be excluded`, unnormalizedTargetExcludePath, unnormalizedTargetPath, unnormalizedTargetPath) - } - } - } - for _, targetExcludePath := range config.targetExcludePaths { - if targetExcludePath == config.targetSubDirPath { - unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(config.targetSubDirPath)) - unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) - return nil, fmt.Errorf("given input %s is equal to a value of --exclude-path %s - this would exclude everything", unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) - } - } - } - return config, nil -} - -type workspaceDepManagerConfig struct { - targetSubDirPath string -} - -func newWorkspaceDepManagerConfig(options []WorkspaceDepManagerOption) (*workspaceDepManagerConfig, error) { - config := &workspaceDepManagerConfig{} - for _, option := range options { - option.applyToWorkspaceDepManagerConfig(config) - } - var err error - config.targetSubDirPath, err = normalpath.NormalizeAndValidate(config.targetSubDirPath) - if err != nil { - return nil, err + config.protoFileTargetPath = normalpath.Normalize(config.protoFileTargetPath) } return config, nil } diff --git a/private/buf/bufworkspace/workspace_dep_manager_provider.go b/private/buf/bufworkspace/workspace_dep_manager_provider.go index f89b731bf0..25846f7302 100644 --- a/private/buf/bufworkspace/workspace_dep_manager_provider.go +++ b/private/buf/bufworkspace/workspace_dep_manager_provider.go @@ -17,6 +17,8 @@ package bufworkspace import ( "context" + "github.com/bufbuild/buf/private/buf/buftarget" + "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "github.com/bufbuild/buf/private/pkg/tracing" @@ -42,7 +44,7 @@ type WorkspaceDepManagerProvider interface { GetWorkspaceDepManager( ctx context.Context, bucket storage.ReadWriteBucket, - options ...WorkspaceDepManagerOption, + bucketTargeting buftarget.BucketTargeting, ) (WorkspaceDepManager, error) } @@ -77,29 +79,15 @@ func newWorkspaceDepManagerProvider( func (w *workspaceDepManagerProvider) GetWorkspaceDepManager( ctx context.Context, bucket storage.ReadWriteBucket, - options ...WorkspaceDepManagerOption, + bucketTargeting buftarget.BucketTargeting, ) (_ WorkspaceDepManager, retErr error) { - config, err := newWorkspaceDepManagerConfig(options) - if err != nil { - return nil, err + controllingWorkspace := bucketTargeting.ControllingWorkspace() + if controllingWorkspace == nil || controllingWorkspace.BufWorkYAMLFile() != nil { + return nil, syserror.New("no supported workspace found") } - workspaceTargeting, err := newWorkspaceTargeting( - ctx, - w.logger, - bucket, - config.targetSubDirPath, - nil, - true, // Disallow buf.work.yamls when doing management of deps for v1 - ) - if err != nil { - return nil, err - } - if workspaceTargeting.isV2() { - return newWorkspaceDepManager(bucket, workspaceTargeting.v2DirPath, true), nil - } - if len(workspaceTargeting.v1DirPaths) != 1 { - // This is because of disallowing buf.work.yamls - return nil, syserror.Newf("expected a single v1 dir path from workspace targeting but got %v", workspaceTargeting.v1DirPaths) + bufYAMLFile := controllingWorkspace.BufYAMLFile() + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + return newWorkspaceDepManager(bucket, controllingWorkspace.Path(), true), nil } - return newWorkspaceDepManager(bucket, workspaceTargeting.v1DirPaths[0], false), nil + return newWorkspaceDepManager(bucket, bucketTargeting.InputDirPath(), false), nil } diff --git a/private/buf/bufworkspace/workspace_provider.go b/private/buf/bufworkspace/workspace_provider.go index b2c607ffba..059c1fb9c7 100644 --- a/private/buf/bufworkspace/workspace_provider.go +++ b/private/buf/bufworkspace/workspace_provider.go @@ -20,10 +20,9 @@ import ( "fmt" "io/fs" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule" - "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "github.com/bufbuild/buf/private/pkg/tracing" @@ -52,6 +51,7 @@ type WorkspaceProvider interface { GetWorkspaceForBucket( ctx context.Context, bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceBucketOption, ) (Workspace, error) @@ -211,11 +211,11 @@ func (w *workspaceProvider) GetWorkspaceForModuleKey( func (w *workspaceProvider) GetWorkspaceForBucket( ctx context.Context, bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, options ...WorkspaceBucketOption, ) (_ Workspace, retErr error) { ctx, span := w.tracer.Start(ctx, tracing.WithErr(&retErr)) defer span.End() - config, err := newWorkspaceBucketConfig(options) if err != nil { return nil, err @@ -230,95 +230,42 @@ func (w *workspaceProvider) GetWorkspaceForBucket( workspaceTargeting, err := newWorkspaceTargeting( ctx, w.logger, + config, bucket, - config.targetSubDirPath, + bucketTargeting, overrideBufYAMLFile, config.ignoreAndDisallowV1BufWorkYAMLs, ) if err != nil { return nil, err } - if workspaceTargeting.isV2() { + if workspaceTargeting.v2 != nil { return w.getWorkspaceForBucketBufYAMLV2( ctx, bucket, - config, - workspaceTargeting.v2DirPath, - overrideBufYAMLFile, + workspaceTargeting.v2, ) } return w.getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ctx, bucket, - config, - workspaceTargeting.v1DirPaths, - overrideBufYAMLFile, + workspaceTargeting.v1, ) } func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ctx context.Context, bucket storage.ReadBucket, - config *workspaceBucketConfig, - moduleDirPaths []string, - // This can be nil, this is only set if config.configOverride was set, which we - // deal with outside of this function. - overrideBufYAMLFile bufconfig.BufYAMLFile, + v1WorkspaceTargeting *v1Targeting, ) (*workspace, error) { - // config.targetSubDirPath is the input subDirPath. We only want to target modules that are inside - // this subDirPath. Example: bufWorkYAMLDirPath is "foo", subDirPath is "foo/bar", - // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" - // and "foo/bar/bat". - // - // This is new behavior - before, we required that you input an exact match for the module directory path, - // but now, we will take all the modules underneath this workspace. - isTargetFunc := func(moduleDirPath string) bool { - return normalpath.EqualsOrContainsPath(config.targetSubDirPath, moduleDirPath, normalpath.Relative) - } moduleSetBuilder := bufmodule.NewModuleSetBuilder(ctx, w.tracer, w.moduleDataProvider, w.commitProvider) - bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) - // We use this to detect different refs across different files. - moduleFullNameStringToConfiguredDepModuleRefString := make(map[string]string) - var allConfiguredDepModuleRefs []bufmodule.ModuleRef - // We keep track of if any module was tentatively targeted, and then actually targeted via - // the paths flags. We use this pre-building of the ModuleSet to see if the --path and - // --exclude-path flags resulted in no targeted modules. This condition is represented - // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false - // - // If hadIsTentativelyTargetModule is false, this means that our input subDirPath was not - // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml - // directories, and this is a system error - this should be verified before we reach this function. - var hadIsTentativelyTargetModule bool - var hadIsTargetModule bool - for _, moduleDirPath := range moduleDirPaths { - moduleConfig, configuredDepModuleRefs, err := getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( - ctx, - bucket, - moduleDirPath, - overrideBufYAMLFile, - ) - if err != nil { - return nil, err - } - for _, configuredDepModuleRef := range configuredDepModuleRefs { - moduleFullNameString := configuredDepModuleRef.ModuleFullName().String() - configuredDepModuleRefString := configuredDepModuleRef.String() - existingConfiguredDepModuleRefString, ok := moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] - if !ok { - // We haven't encountered a ModuleRef with this ModuleFullName yet, add it. - allConfiguredDepModuleRefs = append(allConfiguredDepModuleRefs, configuredDepModuleRef) - moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] = configuredDepModuleRefString - } else if configuredDepModuleRefString != existingConfiguredDepModuleRefString { - // We encountered the same ModuleRef by ModuleFullName, but with a different Ref. - return nil, fmt.Errorf("found different refs for the same module within buf.yaml deps in the workspace: %s %s", configuredDepModuleRefString, existingConfiguredDepModuleRefString) - } - } - bucketIDToModuleConfig[moduleDirPath] = moduleConfig + for _, moduleBucketAndTargeting := range v1WorkspaceTargeting.moduleBucketsAndTargeting { + mappedModuleBucket := moduleBucketAndTargeting.bucket + moduleTargeting := moduleBucketAndTargeting.moduleTargeting bufLockFile, err := bufconfig.GetBufLockFileForPrefix( ctx, - bucket, - // buf.lock files live at the module root - moduleDirPath, + bucket, // Need to use the non-mapped bucket since the mapped bucket excludes the buf.lock + moduleTargeting.moduleDirPath, bufconfig.BufLockFileWithDigestResolver( func(ctx context.Context, remote string, commitID uuid.UUID) (bufmodule.Digest, error) { commitKey, err := bufmodule.NewCommitKey(remote, commitID, bufmodule.DigestTypeB4) @@ -354,40 +301,27 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( ) } } - // We figure out based on the paths if this is really a target module in moduleTargeting. - isTentativelyTargetModule := isTargetFunc(moduleDirPath) - if isTentativelyTargetModule { - hadIsTentativelyTargetModule = true - } - mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( - ctx, - bucket, - config, - moduleDirPath, - moduleConfig, - isTentativelyTargetModule, - ) - if err != nil { - return nil, err - } - if moduleTargeting.isTargetModule { - hadIsTargetModule = true - } - v1BufYAMLObjectData, err := bufconfig.GetBufYAMLV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleDirPath) + v1BufYAMLObjectData, err := bufconfig.GetBufYAMLV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleTargeting.moduleDirPath) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, err } } - v1BufLockObjectData, err := bufconfig.GetBufLockV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleDirPath) + v1BufLockObjectData, err := bufconfig.GetBufLockV1Beta1OrV1ObjectDataForPrefix(ctx, bucket, moduleTargeting.moduleDirPath) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, err } } + moduleConfig, ok := v1WorkspaceTargeting.bucketIDToModuleConfig[moduleTargeting.moduleDirPath] + if !ok { + // This should not happen since moduleBucketAndTargeting is derived from the module + // configs, however, we return this error as a safety check + return nil, fmt.Errorf("no module config found for module at: %q", moduleTargeting.moduleDirPath) + } moduleSetBuilder.AddLocalModule( mappedModuleBucket, - moduleDirPath, + moduleTargeting.moduleDirPath, // bucket ID moduleTargeting.isTargetModule, bufmodule.LocalModuleWithModuleFullName(moduleConfig.ModuleFullName()), bufmodule.LocalModuleWithTargetPaths( @@ -402,21 +336,14 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( bufmodule.LocalModuleWithV1Beta1OrV1BufLockObjectData(v1BufLockObjectData), ) } - if !hadIsTentativelyTargetModule { - return nil, syserror.Newf("subDirPath %q did not result in any target modules from moduleDirPaths %v", config.targetSubDirPath, moduleDirPaths) - } - if !hadIsTargetModule { - // It would be nice to have a better error message than this in the long term. - return nil, bufmodule.ErrNoTargetProtoFiles - } moduleSet, err := moduleSetBuilder.Build() if err != nil { return nil, err } return w.getWorkspaceForBucketModuleSet( moduleSet, - bucketIDToModuleConfig, - allConfiguredDepModuleRefs, + v1WorkspaceTargeting.bucketIDToModuleConfig, + v1WorkspaceTargeting.allConfiguredDepModuleRefs, false, ) } @@ -424,41 +351,9 @@ func (w *workspaceProvider) getWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1( func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ctx context.Context, bucket storage.ReadBucket, - config *workspaceBucketConfig, - dirPath string, - // This can be nil, this is only set if config.configOverride was set, which we - // deal with outside of this function. - overrideBufYAMLFile bufconfig.BufYAMLFile, + v2Targeting *v2Targeting, ) (*workspace, error) { - bucket = storage.MapReadBucket(bucket, storage.MapOnPrefix(dirPath)) - - var bufYAMLFile bufconfig.BufYAMLFile - var err error - if overrideBufYAMLFile != nil { - bufYAMLFile = overrideBufYAMLFile - } else { - bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, ".") - if err != nil { - // This should be apparent from above functions. - return nil, syserror.Newf("error getting v2 buf.yaml: %w", err) - } - } - if bufYAMLFile.FileVersion() != bufconfig.FileVersionV2 { - return nil, syserror.Newf("expected v2 buf.yaml but got %v", bufYAMLFile.FileVersion()) - } - - // config.targetSubDirPath is the input targetSubDirPath. We only want to target modules that are inside - // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", - // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" - // and "foo/bar/bat". - // - // This is new behavior - before, we required that you input an exact match for the module directory path, - // but now, we will take all the modules underneath this workspace. - isTargetFunc := func(moduleDirPath string) bool { - return normalpath.EqualsOrContainsPath(config.targetSubDirPath, moduleDirPath, normalpath.Relative) - } moduleSetBuilder := bufmodule.NewModuleSetBuilder(ctx, w.tracer, w.moduleDataProvider, w.commitProvider) - bufLockFile, err := bufconfig.GetBufLockFileForPrefix( ctx, bucket, @@ -488,45 +383,18 @@ func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ) } } - - bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) - // We keep track of if any module was tentatively targeted, and then actually targeted via - // the paths flags. We use this pre-building of the ModuleSet to see if the --path and - // --exclude-path flags resulted in no targeted modules. This condition is represented - // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false - // - // If hadIsTentativelyTargetModule is false, this means that our input subDirPath was not - // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml - // directories, and this is a system error - this should be verified before we reach this function. - var hadIsTentativelyTargetModule bool - var hadIsTargetModule bool - var moduleDirPaths []string - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - moduleDirPath := moduleConfig.DirPath() - moduleDirPaths = append(moduleDirPaths, moduleDirPath) - bucketIDToModuleConfig[moduleDirPath] = moduleConfig - // We figure out based on the paths if this is really a target module in moduleTargeting. - isTentativelyTargetModule := isTargetFunc(moduleDirPath) - if isTentativelyTargetModule { - hadIsTentativelyTargetModule = true - } - mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( - ctx, - bucket, - config, - moduleDirPath, - moduleConfig, - isTentativelyTargetModule, - ) - if err != nil { - return nil, err - } - if moduleTargeting.isTargetModule { - hadIsTargetModule = true + for _, moduleBucketAndTargeting := range v2Targeting.moduleBucketsAndTargeting { + mappedModuleBucket := moduleBucketAndTargeting.bucket + moduleTargeting := moduleBucketAndTargeting.moduleTargeting + moduleConfig, ok := v2Targeting.bucketIDToModuleConfig[moduleTargeting.moduleDirPath] + if !ok { + // This should not happen since moduleBucketAndTargeting is derived from the module + // configs, however, we return this error as a safety check + return nil, fmt.Errorf("no module config found for module at: %q", moduleTargeting.moduleDirPath) } moduleSetBuilder.AddLocalModule( mappedModuleBucket, - moduleDirPath, + moduleTargeting.moduleDirPath, moduleTargeting.isTargetModule, bufmodule.LocalModuleWithModuleFullName(moduleConfig.ModuleFullName()), bufmodule.LocalModuleWithTargetPaths( @@ -539,22 +407,14 @@ func (w *workspaceProvider) getWorkspaceForBucketBufYAMLV2( ), ) } - if !hadIsTentativelyTargetModule { - return nil, syserror.Newf("targetSubDirPath %q did not result in any target modules from moduleDirPaths %v", config.targetSubDirPath, moduleDirPaths) - } - if !hadIsTargetModule { - // It would be nice to have a better error message than this in the long term. - return nil, bufmodule.ErrNoTargetProtoFiles - } moduleSet, err := moduleSetBuilder.Build() if err != nil { return nil, err } - // bufYAMLFile.ConfiguredDepModuleRefs() is unique by ModuleFullName. return w.getWorkspaceForBucketModuleSet( moduleSet, - bucketIDToModuleConfig, - bufYAMLFile.ConfiguredDepModuleRefs(), + v2Targeting.bucketIDToModuleConfig, + v2Targeting.bufYAMLFile.ConfiguredDepModuleRefs(), true, ) } @@ -591,101 +451,3 @@ func (w *workspaceProvider) getWorkspaceForBucketModuleSet( isV2, ), nil } - -func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( - ctx context.Context, - bucket storage.ReadBucket, - moduleDirPath string, - overrideBufYAMLFile bufconfig.BufYAMLFile, -) (bufconfig.ModuleConfig, []bufmodule.ModuleRef, error) { - var bufYAMLFile bufconfig.BufYAMLFile - var err error - if overrideBufYAMLFile != nil { - bufYAMLFile = overrideBufYAMLFile - } else { - bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, moduleDirPath) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // If we do not have a buf.yaml, we use the default config. - // This is a v1 config. - return bufconfig.DefaultModuleConfigV1, nil, nil - } - return nil, nil, err - } - } - // Just a sanity check. This should have already been validated, but let's make sure. - if bufYAMLFile.FileVersion() != bufconfig.FileVersionV1Beta1 && bufYAMLFile.FileVersion() != bufconfig.FileVersionV1 { - return nil, nil, syserror.Newf("buf.yaml at %s did not have version v1beta1 or v1", moduleDirPath) - } - moduleConfigs := bufYAMLFile.ModuleConfigs() - if len(moduleConfigs) != 1 { - // This is a system error. This should never happen. - return nil, nil, syserror.Newf("received %d ModuleConfigs from a v1beta1 or v1 BufYAMLFile", len(moduleConfigs)) - } - return moduleConfigs[0], bufYAMLFile.ConfiguredDepModuleRefs(), nil -} - -func getMappedModuleBucketAndModuleTargeting( - ctx context.Context, - bucket storage.ReadBucket, - config *workspaceBucketConfig, - moduleDirPath string, - moduleConfig bufconfig.ModuleConfig, - isTargetModule bool, -) (storage.ReadBucket, *moduleTargeting, error) { - moduleBucket := storage.MapReadBucket( - bucket, - storage.MapOnPrefix(moduleDirPath), - ) - rootToExcludes := moduleConfig.RootToExcludes() - var rootBuckets []storage.ReadBucket - for root, excludes := range rootToExcludes { - // Roots only applies to .proto files. - mappers := []storage.Mapper{ - // need to do match extension here - // https://github.com/bufbuild/buf/issues/113 - storage.MatchPathExt(".proto"), - storage.MapOnPrefix(root), - } - if len(excludes) != 0 { - var notOrMatchers []storage.Matcher - for _, exclude := range excludes { - notOrMatchers = append( - notOrMatchers, - storage.MatchPathContained(exclude), - ) - } - mappers = append( - mappers, - storage.MatchNot( - storage.MatchOr( - notOrMatchers..., - ), - ), - ) - } - rootBuckets = append( - rootBuckets, - storage.MapReadBucket( - moduleBucket, - mappers..., - ), - ) - } - rootBuckets = append( - rootBuckets, - bufmodule.GetDocStorageReadBucket(ctx, moduleBucket), - bufmodule.GetLicenseStorageReadBucket(moduleBucket), - ) - mappedModuleBucket := storage.MultiReadBucket(rootBuckets...) - moduleTargeting, err := newModuleTargeting( - moduleDirPath, - slicesext.MapKeysToSlice(rootToExcludes), - config, - isTargetModule, - ) - if err != nil { - return nil, nil, err - } - return mappedModuleBucket, moduleTargeting, nil -} diff --git a/private/buf/bufworkspace/workspace_targeting.go b/private/buf/bufworkspace/workspace_targeting.go index 2898e5dc45..80e4b63c03 100644 --- a/private/buf/bufworkspace/workspace_targeting.go +++ b/private/buf/bufworkspace/workspace_targeting.go @@ -17,72 +17,105 @@ package bufworkspace import ( "context" "errors" + "fmt" + "io/fs" + "path/filepath" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/slicesext" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/syserror" "go.uber.org/zap" ) -// workspaceTargeting figures out what directories to target and at what version for both -// the WorkpaceProvider and WorkspaceDepManagerProvider. +// workspaceTargeting figures out if we are working with a v1 or v2 workspace based on +// buftarget.BucketTargeting information and provides the workspace targeting information +// to WorkspaceProvider and WorkspaceDepManagerProvider. +// +// For v1 workspaces, this provides the bucket IDs to module configs, mapped module buckets and +// module targeting information, and all configured dependency module refs. +// +// # For v2 workspaces, this provides the bucket IDs to module configs, module dir paths, +// and mapped module buckets and module targeting. +// +// In the case where no controlling workspace is found, we default to a v1 workspace with +// a single module directory path, which is equivalent to the input dir. +// +// We only ever return one of v1 or v2. type workspaceTargeting struct { - // v1DirPaths are the target v1 directory paths that should contain Modules. - // - // This is set if we are within a buf.work.yaml, if we directly targeted a v1 buf.yaml, or - // if no configuration was found (in which case we default to v1). - v1DirPaths []string - // v2DirPath is the path to the v2 buf.yaml. - v2DirPath string + v1 *v1Targeting + v2 *v2Targeting +} + +type v1Targeting struct { + bucketIDToModuleConfig map[string]bufconfig.ModuleConfig + moduleBucketsAndTargeting []*moduleBucketAndModuleTargeting + allConfiguredDepModuleRefs []bufmodule.ModuleRef +} + +type v2Targeting struct { + bufYAMLFile bufconfig.BufYAMLFile + bucketIDToModuleConfig map[string]bufconfig.ModuleConfig + moduleBucketsAndTargeting []*moduleBucketAndModuleTargeting +} + +type moduleBucketAndModuleTargeting struct { + bucket storage.ReadBucket + moduleTargeting *moduleTargeting } func newWorkspaceTargeting( ctx context.Context, logger *zap.Logger, + config *workspaceBucketConfig, bucket storage.ReadBucket, - targetSubDirPath string, + bucketTargeting buftarget.BucketTargeting, overrideBufYAMLFile bufconfig.BufYAMLFile, ignoreAndDisallowV1BufWorkYAMLs bool, ) (*workspaceTargeting, error) { - targetSubDirPath, err := normalpath.NormalizeAndValidate(targetSubDirPath) - if err != nil { + if err := validateBucketTargeting(bucketTargeting, config.protoFileTargetPath); err != nil { return nil, err } if overrideBufYAMLFile != nil { logger.Debug( "targeting workspace with config override", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input dir", bucketTargeting.InputDirPath()), ) switch fileVersion := overrideBufYAMLFile.FileVersion(); fileVersion { case bufconfig.FileVersionV1Beta1, bufconfig.FileVersionV1: - // Operate as if there was no buf.work.yaml, only a v1 buf.yaml at the specified - // targetSubDirPath, specifying a single module. - return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, - }, nil + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputDirPath()}, + overrideBufYAMLFile, + ) case bufconfig.FileVersionV2: - // Operate as if there was a v2 buf.yaml at the target sub directory path. - return &workspaceTargeting{ - v2DirPath: targetSubDirPath, - }, nil + return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, overrideBufYAMLFile) default: return nil, syserror.Newf("unknown FileVersion: %v", fileVersion) } } - - findControllingWorkspaceResult, err := bufconfig.FindControllingWorkspace(ctx, bucket, ".", targetSubDirPath) - if err != nil { - return nil, err - } - if findControllingWorkspaceResult.Found() { - // We have a v1 buf.work.yaml, per the documentation on bufconfig.FindControllingWorkspace. - if bufWorkYAMLDirPaths := findControllingWorkspaceResult.BufWorkYAMLDirPaths(); len(bufWorkYAMLDirPaths) > 0 { + if controllingWorkspace := bucketTargeting.ControllingWorkspace(); controllingWorkspace != nil { + // This is a v2 workspace. + if controllingWorkspace.BufYAMLFile() != nil { + logger.Debug( + "targeting workspace based on v2 buf.yaml", + zap.String("input dir", bucketTargeting.InputDirPath()), + ) + return v2WorkspaceTargeting(ctx, config, bucket, bucketTargeting, controllingWorkspace.BufYAMLFile()) + } + // This is a v1 workspace. + if bufWorkYAMLFile := controllingWorkspace.BufWorkYAMLFile(); bufWorkYAMLFile != nil { if ignoreAndDisallowV1BufWorkYAMLs { - // targetSubDirPath is normalized, so if it was empty, it will be ".". - if targetSubDirPath == "." { - // If config.targetSubDirPath is ".", this means we targeted a buf.work.yaml, not an individual module within the buf.work.yaml - // This is disallowed. + // This means that we attempted to target a v1 workspace at the buf.work.yaml, not + // an individual module within the v1 workspace defined in buf.work.yaml. + // This is disallowed. + if bucketTargeting.InputDirPath() == "." { return nil, errors.New(`Workspaces defined with buf.work.yaml cannot be updated or pushed, only the individual modules within a workspace can be updated or pushed. Workspaces defined with a v2 buf.yaml can be updated, see the migration documentation for more details.`) @@ -91,42 +124,568 @@ defined with a v2 buf.yaml can be updated, see the migration documentation for m // the workspace entirely, and just act as if the buf.work.yaml did not exist. logger.Debug( "targeting workspace, ignoring v1 buf.work.yaml, just building on module at target", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input dir", bucketTargeting.InputDirPath()), + ) + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + []string{bucketTargeting.InputDirPath()}, // Assume we are targeting only the module at the input dir + nil, ) - return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, - }, nil } - logger.Debug( - "targeting workspace based on v1 buf.work.yaml", - zap.String("targetSubDirPath", targetSubDirPath), - zap.Strings("bufWorkYAMLDirPaths", bufWorkYAMLDirPaths), + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + bufWorkYAMLFile.DirPaths(), + nil, ) - return &workspaceTargeting{ - v1DirPaths: bufWorkYAMLDirPaths, - }, nil } - logger.Debug( - "targeting workspace based on v2 buf.yaml", - zap.String("targetSubDirPath", targetSubDirPath), - ) - // We have a v2 buf.yaml. - return &workspaceTargeting{ - v2DirPath: ".", - }, nil } - logger.Debug( "targeting workspace with no found buf.work.yaml or buf.yaml", - zap.String("targetSubDirPath", targetSubDirPath), + zap.String("input dir", bucketTargeting.InputDirPath()), + ) + // We did not find any buf.work.yaml or buf.yaml, we invoke fallback logic. + return fallbackWorkspaceTargeting( + ctx, + logger, + config, + bucket, + bucketTargeting, ) - // We did not find any buf.work.yaml or buf.yaml, operate as if a - // default v1 buf.yaml was at config.targetSubDirPath. +} + +func v2WorkspaceTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + bufYAMLFile bufconfig.BufYAMLFile, +) (*workspaceTargeting, error) { + // We keep track of if any module was tentatively targeted, and then actually targeted via + // the paths flags. We use this pre-building of the ModuleSet to see if the --path and + // --exclude-path flags resulted in no targeted modules. This condition is represented + // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false + // + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDirPath() was not + // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml + // directories, and this is a system error - this should be verified before we reach this function. + var hadIsTentativelyTargetModule bool + var hadIsTargetModule bool + moduleDirPaths := make([]string, 0, len(bufYAMLFile.ModuleConfigs())) + bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) + moduleBucketsAndTargeting := make([]*moduleBucketAndModuleTargeting, 0, len(bufYAMLFile.ModuleConfigs())) + for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { + moduleDirPath := moduleConfig.DirPath() + moduleDirPaths = append(moduleDirPaths, moduleDirPath) + bucketIDToModuleConfig[moduleDirPath] = moduleConfig + // bucketTargeting.InputDirPath() is the input targetSubDirPath. We only want to target modules that are inside + // this targetSubDirPath. Example: bufWorkYAMLDirPath is "foo", targetSubDirPath is "foo/bar", + // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" + // and "foo/bar/bat". + // + // This is new behavior - before, we required that you input an exact match for the module directory path, + // but now, we will take all the modules underneath this workspace. + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDirPath(), moduleDirPath, normalpath.Relative) + // We ignore this check for proto file refs, since the input is considered the directory + // of the proto file reference, which is unlikely to contain a module in its entirety. + // In the future, it would be nice to handle this more elegently. + if config.protoFileTargetPath != "" { + isTentativelyTargetModule = true + } + if isTentativelyTargetModule { + hadIsTentativelyTargetModule = true + } + mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( + ctx, + config, + bucket, + bucketTargeting, + moduleDirPath, + moduleConfig, + isTentativelyTargetModule, + ) + if err != nil { + return nil, err + } + if moduleTargeting.isTargetModule { + hadIsTargetModule = true + } + moduleBucketsAndTargeting = append(moduleBucketsAndTargeting, &moduleBucketAndModuleTargeting{ + bucket: mappedModuleBucket, + moduleTargeting: moduleTargeting, + }) + } + if !hadIsTentativelyTargetModule { + // Check if the input is overlapping within a module dir path. If so, return a nicer + // error. In the future, we want to remove special treatment for input dir, and it + // should be treated just like any target path. + return nil, checkForOverlap(bucketTargeting.InputDirPath(), moduleDirPaths) + } + if !hadIsTargetModule { + // It would be nice to have a better error message than this in the long term. + return nil, bufmodule.ErrNoTargetProtoFiles + } + return &workspaceTargeting{ + v2: &v2Targeting{ + bufYAMLFile: bufYAMLFile, + bucketIDToModuleConfig: bucketIDToModuleConfig, + moduleBucketsAndTargeting: moduleBucketsAndTargeting, + }, + }, nil +} + +func v1WorkspaceTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + moduleDirPaths []string, + overrideBufYAMLFile bufconfig.BufYAMLFile, +) (*workspaceTargeting, error) { + // We keep track of if any module was tentatively targeted, and then actually targeted via + // the paths flags. We use this pre-building of the ModuleSet to see if the --path and + // --exclude-path flags resulted in no targeted modules. This condition is represented + // by hadIsTentativelyTargetModule == true && hadIsTargetModule = false + // + // If hadIsTentativelyTargetModule is false, this means that our input bucketTargeting.InputDirPath() was not + // actually representative of any module that we detected in buf.work.yaml or v2 buf.yaml + // directories, and this is a system error - this should be verified before we reach this function. + var hadIsTentativelyTargetModule bool + var hadIsTargetModule bool + var allConfiguredDepModuleRefs []bufmodule.ModuleRef + bucketIDToModuleConfig := make(map[string]bufconfig.ModuleConfig) + // We use this to detect different refs across different files. + moduleFullNameStringToConfiguredDepModuleRefString := make(map[string]string) + moduleBucketsAndTargeting := make([]*moduleBucketAndModuleTargeting, 0, len(moduleDirPaths)) + for _, moduleDirPath := range moduleDirPaths { + moduleConfig, configuredDepModuleRefs, err := getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( + ctx, + bucket, + moduleDirPath, + overrideBufYAMLFile, + ) + if err != nil { + return nil, err + } + for _, configuredDepModuleRef := range configuredDepModuleRefs { + moduleFullNameString := configuredDepModuleRef.ModuleFullName().String() + configuredDepModuleRefString := configuredDepModuleRef.String() + existingConfiguredDepModuleRefString, ok := moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] + if !ok { + // We haven't encountered a ModuleRef with this ModuleFullName yet, add it. + allConfiguredDepModuleRefs = append(allConfiguredDepModuleRefs, configuredDepModuleRef) + moduleFullNameStringToConfiguredDepModuleRefString[moduleFullNameString] = configuredDepModuleRefString + } else if configuredDepModuleRefString != existingConfiguredDepModuleRefString { + // We encountered the same ModuleRef by ModuleFullName, but with a different Ref. + return nil, fmt.Errorf("found different refs for the same module within buf.yaml deps in the workspace: %s %s", configuredDepModuleRefString, existingConfiguredDepModuleRefString) + } + } + bucketIDToModuleConfig[moduleDirPath] = moduleConfig + // We only want to target modules that are inside the bucketTargeting.InputDirPath(). + // Example: bufWorkYAMLDirPath is "foo", bucketTargeting.InputDirPath() is "foo/bar", + // listed directories are "bar/baz", "bar/bat", "other". We want to include "foo/bar/baz" + // and "foo/bar/bat". + // + // This is new behavior - before, we required that you input an exact match for the module directory path, + // but now, we will take all the modules underneath this workspace. + isTentativelyTargetModule := normalpath.EqualsOrContainsPath(bucketTargeting.InputDirPath(), moduleDirPath, normalpath.Relative) + // We ignore this check for proto file refs, since the input is considered the directory + // of the proto file reference, which is unlikely to contain a module in its entirety. + // In the future, it would be nice to handle this more elegently. + if config.protoFileTargetPath != "" { + isTentativelyTargetModule = true + } + if isTentativelyTargetModule { + hadIsTentativelyTargetModule = true + } + mappedModuleBucket, moduleTargeting, err := getMappedModuleBucketAndModuleTargeting( + ctx, + config, + bucket, + bucketTargeting, + moduleDirPath, + moduleConfig, + isTentativelyTargetModule, + ) + if err != nil { + return nil, err + } + if moduleTargeting.isTargetModule { + hadIsTargetModule = true + } + moduleBucketsAndTargeting = append(moduleBucketsAndTargeting, &moduleBucketAndModuleTargeting{ + bucket: mappedModuleBucket, + moduleTargeting: moduleTargeting, + }) + } + if !hadIsTentativelyTargetModule { + // Check if the input is overlapping within a module dir path. If so, return a nicer + // error. In the future, we want to remove special treatment for input dir, and it + // should be treated just like any target path. + return nil, checkForOverlap(bucketTargeting.InputDirPath(), moduleDirPaths) + } + if !hadIsTargetModule { + // It would be nice to have a better error message than this in the long term. + return nil, bufmodule.ErrNoTargetProtoFiles + } return &workspaceTargeting{ - v1DirPaths: []string{targetSubDirPath}, + v1: &v1Targeting{ + bucketIDToModuleConfig: bucketIDToModuleConfig, + moduleBucketsAndTargeting: moduleBucketsAndTargeting, + allConfiguredDepModuleRefs: allConfiguredDepModuleRefs, + }, }, nil } -func (w *workspaceTargeting) isV2() bool { - return w.v2DirPath != "" +// fallbackWorkspaceTargeting is the fallback logic when there is no config override or +// controlling workspace discovered through bucket targeting. +// +// 1. We check if the input is in a v1 module. If yes, then we simply use that. +// 2. If no v1 module was found for the input, we check the target paths, if there are any. +// For each target path, we check if it is part of a workspace or v1 module. +// +// a. If we find a v1 or v2 workspace, we ensure that all them resolve to the same workspace. +// b. If we find no workspace, we keep track of any v1 modules we find along the way. We +// then build those v1 modules as a v1 workspace. +// +// 3. In the case where we find nothing, we set the input as a v1 module in a v1 workspace. +func fallbackWorkspaceTargeting( + ctx context.Context, + logger *zap.Logger, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, +) (*workspaceTargeting, error) { + var v1ModulePaths []string + inputDirV1Module, err := checkForControllingWorkspaceOrV1Module( + ctx, + logger, + bucket, + bucketTargeting.InputDirPath(), + true, + ) + if err != nil { + return nil, err + } + if inputDirV1Module != nil { + v1ModulePaths = append(v1ModulePaths, inputDirV1Module.Path()) + } else if config.protoFileTargetPath == "" { + // No v1 module found for the input dir, if the input was not a protoFileRef, then + // check the target paths to see if a workspace or v1 module exists. + var v1BufWorkYAML bufconfig.BufWorkYAMLFile + var v2BufYAMLFile bufconfig.BufYAMLFile + var controllingWorkspacePath string + for _, targetPath := range bucketTargeting.TargetPaths() { + controllingWorkspaceOrModule, err := checkForControllingWorkspaceOrV1Module( + ctx, + logger, + bucket, + targetPath, + false, + ) + if err != nil { + return nil, err + } + if controllingWorkspaceOrModule != nil { + // v1 workspace found + if bufWorkYAMLFile := controllingWorkspaceOrModule.BufWorkYAMLFile(); bufWorkYAMLFile != nil { + if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { + return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) + } + controllingWorkspacePath = controllingWorkspaceOrModule.Path() + v1BufWorkYAML = bufWorkYAMLFile + continue + } + // v2 workspace or v1 module found + if bufYAMLFile := controllingWorkspaceOrModule.BufYAMLFile(); bufYAMLFile != nil { + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV2 { + if controllingWorkspacePath != "" && controllingWorkspaceOrModule.Path() != controllingWorkspacePath { + return nil, fmt.Errorf("different controlling workspaces found: %q, %q", controllingWorkspacePath, controllingWorkspaceOrModule.Path()) + } + controllingWorkspacePath = controllingWorkspaceOrModule.Path() + v2BufYAMLFile = bufYAMLFile + continue + } + if bufYAMLFile.FileVersion() == bufconfig.FileVersionV1 { + v1ModulePaths = append(v1ModulePaths, controllingWorkspaceOrModule.Path()) + } + } + } + } + // If multiple workspaces were found, we return an error since we don't support building + // multiple workspaces. + if v1BufWorkYAML != nil && v2BufYAMLFile != nil { + return nil, fmt.Errorf("multiple workspaces found") + } + // If we found a workspace and v1 modules that were not contained in the workspace, we + // do not support building multiple workspaces. + if controllingWorkspacePath != "" && len(v1ModulePaths) > 0 { + return nil, fmt.Errorf("found a workspace %q that does not contain all found modules: %v", controllingWorkspacePath, v1ModulePaths) + } + if v2BufYAMLFile != nil { + return v2WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + v2BufYAMLFile, + ) + } + if v1BufWorkYAML != nil { + v1ModulePaths = v1BufWorkYAML.DirPaths() + } + } + // If we still have no v1 module paths, then we go to the final fallback and set a v1 + // module at the input dir. + if len(v1ModulePaths) == 0 { + v1ModulePaths = append(v1ModulePaths, bucketTargeting.InputDirPath()) + } + return v1WorkspaceTargeting( + ctx, + config, + bucket, + bucketTargeting, + v1ModulePaths, + nil, + ) +} + +func validateBucketTargeting( + bucketTargeting buftarget.BucketTargeting, + protoFilePath string, +) error { + if protoFilePath != "" { + // We set the proto file path as a target path, which we handle in module targeting, + // so we expect len(bucketTargeting.TargetPaths()) to be exactly 1. + if len(bucketTargeting.TargetPaths()) != 1 || len(bucketTargeting.TargetExcludePaths()) > 0 { + // This is just a system error. We messed up and called both exclusive options. + return syserror.New("cannot set targetPaths/targetExcludePaths with protoFileTargetPaths") + } + } + // These are actual user errors. This is us verifying --path and --exclude-path. + // An argument could be made this should be at a higher level for user errors, and then + // if it gets to this point, this should be a system error. + // + // We don't use --path, --exclude-path here because these paths have had ExternalPathToPath + // applied to them. Which is another argument to do this at a higher level. + for _, targetPath := range bucketTargeting.TargetPaths() { + if targetPath == bucketTargeting.InputDirPath() { + // The targetPath/InputDirPath may not equal something on the command line as we have done + // targeting via workspaces by now, so do not print them. + return errors.New("given input is equal to a value of --path, this has no effect and is disallowed") + } + // We want this to be deterministic. We don't have that many paths in almost all cases. + // This being n^2 shouldn't be a huge issue unless someone has a diabolical wrapping shell script. + // If this becomes a problem, there's optimizations we can do by turning targetExcludePaths into + // a map but keeping the index in targetExcludePaths around to prioritize what error + // message to print. + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetPath == targetExcludePath { + unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) + return fmt.Errorf(`cannot set the same path for both --path and --exclude-path: "%s"`, unnormalizedTargetPath) + } + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + if normalpath.EqualsOrContainsPath(targetExcludePath, targetPath, normalpath.Relative) { + // We clean and unnormalize the target paths to show in the error message + unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) + unnormalizedTargetPath := filepath.Clean(normalpath.Unnormalize(targetPath)) + return fmt.Errorf(`excluded path "%s" contains targeted path "%s", which means all paths in "%s" will be excluded`, unnormalizedTargetExcludePath, unnormalizedTargetPath, unnormalizedTargetPath) + } + } + } + for _, targetExcludePath := range bucketTargeting.TargetExcludePaths() { + if targetExcludePath == bucketTargeting.InputDirPath() { + unnormalizedTargetSubDirPath := filepath.Clean(normalpath.Unnormalize(bucketTargeting.InputDirPath())) + unnormalizedTargetExcludePath := filepath.Clean(normalpath.Unnormalize(targetExcludePath)) + return fmt.Errorf(`given input "%s" is equal to a value of --exclude-path "%s", this would exclude everything`, unnormalizedTargetSubDirPath, unnormalizedTargetExcludePath) + } + } + return nil +} + +func getMappedModuleBucketAndModuleTargeting( + ctx context.Context, + config *workspaceBucketConfig, + bucket storage.ReadBucket, + bucketTargeting buftarget.BucketTargeting, + moduleDirPath string, + moduleConfig bufconfig.ModuleConfig, + isTargetModule bool, +) (storage.ReadBucket, *moduleTargeting, error) { + moduleBucket := storage.MapReadBucket( + bucket, + storage.MapOnPrefix(moduleDirPath), + ) + rootToExcludes := moduleConfig.RootToExcludes() + var rootBuckets []storage.ReadBucket + for root, excludes := range rootToExcludes { + // Roots only applies to .proto files. + mappers := []storage.Mapper{ + // need to do match extension here + // https://github.com/bufbuild/buf/issues/113 + storage.MatchPathExt(".proto"), + storage.MapOnPrefix(root), + } + if len(excludes) != 0 { + var notOrMatchers []storage.Matcher + for _, exclude := range excludes { + notOrMatchers = append( + notOrMatchers, + storage.MatchPathContained(exclude), + ) + } + mappers = append( + mappers, + storage.MatchNot( + storage.MatchOr( + notOrMatchers..., + ), + ), + ) + } + rootBuckets = append( + rootBuckets, + storage.MapReadBucket( + moduleBucket, + mappers..., + ), + ) + } + rootBuckets = append( + rootBuckets, + bufmodule.GetDocStorageReadBucket(ctx, moduleBucket), + bufmodule.GetLicenseStorageReadBucket(moduleBucket), + ) + mappedModuleBucket := storage.MultiReadBucket(rootBuckets...) + moduleTargeting, err := newModuleTargeting( + moduleDirPath, + slicesext.MapKeysToSlice(rootToExcludes), + bucketTargeting, + config, + isTargetModule, + ) + if err != nil { + return nil, nil, err + } + return mappedModuleBucket, moduleTargeting, nil +} + +func getModuleConfigAndConfiguredDepModuleRefsV1Beta1OrV1( + ctx context.Context, + bucket storage.ReadBucket, + moduleDirPath string, + overrideBufYAMLFile bufconfig.BufYAMLFile, +) (bufconfig.ModuleConfig, []bufmodule.ModuleRef, error) { + var bufYAMLFile bufconfig.BufYAMLFile + var err error + if overrideBufYAMLFile != nil { + bufYAMLFile = overrideBufYAMLFile + } else { + bufYAMLFile, err = bufconfig.GetBufYAMLFileForPrefix(ctx, bucket, moduleDirPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // If we do not have a buf.yaml, we use the default config. + // This is a v1 config. + return bufconfig.DefaultModuleConfigV1, nil, nil + } + return nil, nil, err + } + } + // Just a sanity check. This should have already been validated, but let's make sure. + if bufYAMLFile.FileVersion() != bufconfig.FileVersionV1Beta1 && bufYAMLFile.FileVersion() != bufconfig.FileVersionV1 { + return nil, nil, syserror.Newf("buf.yaml at %s did not have version v1beta1 or v1", moduleDirPath) + } + moduleConfigs := bufYAMLFile.ModuleConfigs() + if len(moduleConfigs) != 1 { + // This is a system error. This should never happen. + return nil, nil, syserror.Newf("received %d ModuleConfigs from a v1beta1 or v1 BufYAMLFile", len(moduleConfigs)) + } + return moduleConfigs[0], bufYAMLFile.ConfiguredDepModuleRefs(), nil +} + +// checkForControllingWorkspaceOrV1Module take a bucket and path, and walks up the bucket +// from the base of the path, checking for a controlling workspace or v1 module. +// +// If ignoreWorkspaceCheck is set to true, then we only look for v1 modules. This is done +// for the input, since we already did the workspace check in the initial bucketTargeting. +// Note that this is something that we could build into the initial bucketTargeting, +// however, moving forward, we want to encourage users to move to v2 workspaces, so it is +// nice to be able to isolate this as fallback logic here. +func checkForControllingWorkspaceOrV1Module( + ctx context.Context, + logger *zap.Logger, + bucket storage.ReadBucket, + path string, + ignoreWorkspaceCheck bool, +) (buftarget.ControllingWorkspace, error) { + path = normalpath.Normalize(path) + // Keep track of any v1 module found along the way. If we find a v1 or v2 workspace, we + // return that over the v1 module, but we return this as the fallback. + var fallbackV1Module buftarget.ControllingWorkspace + // Similar to the mapping loop in buftarget for buftarget.BucketTargeting, we can't do + // this in a traditional loop like this: + // + // for curDirPath := path; curDirPath != "."; curDirPath = normalpath.Dir(curDirPath) { + // + // If we do that, then we don't run terminateFunc for ".", which we want to so that we get + // the correct value for the terminate bool. + // + // Instead, we effectively do a do-while loop. + curDirPath := path + for { + if !ignoreWorkspaceCheck { + controllingWorkspace, err := buftarget.TerminateAtControllingWorkspace( + ctx, + bucket, + curDirPath, + path, + ) + if err != nil { + return nil, err + } + if controllingWorkspace != nil { + logger.Debug( + "buffetch termination found", + zap.String("curDirPath", curDirPath), + zap.String("path", path), + ) + return controllingWorkspace, nil + } + } + // Then check for a v1 module + v1Module, err := buftarget.TerminateAtV1Module(ctx, bucket, curDirPath, path) + if err != nil { + return nil, err + } + if v1Module != nil { + if fallbackV1Module != nil { + return nil, fmt.Errorf("nested modules %q and %q are not allowed", fallbackV1Module.Path(), v1Module.Path()) + } + fallbackV1Module = v1Module + } + if curDirPath == "." { + break + } + curDirPath = normalpath.Dir(curDirPath) + } + logger.Debug( + "buffetch no termination found", + zap.String("path", path), + ) + return fallbackV1Module, nil +} + +func checkForOverlap(inputPath string, moduleDirPaths []string) error { + for _, moduleDirPath := range moduleDirPaths { + if normalpath.ContainsPath(moduleDirPath, inputPath, normalpath.Relative) { + return fmt.Errorf("failed to build input %q because it is contained by module at path %q specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path", inputPath, moduleDirPath) + } + } + return fmt.Errorf("input %q did not contain modules found in workspace %v", inputPath, moduleDirPaths) } diff --git a/private/buf/bufworkspace/workspace_test.go b/private/buf/bufworkspace/workspace_test.go index 8c899de677..4e6b83f7ae 100644 --- a/private/buf/bufworkspace/workspace_test.go +++ b/private/buf/bufworkspace/workspace_test.go @@ -20,6 +20,7 @@ import ( "io/fs" "testing" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduletesting" "github.com/bufbuild/buf/private/pkg/dag/dagtest" @@ -28,6 +29,7 @@ import ( "github.com/bufbuild/buf/private/pkg/tracing" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) func TestBasicV1(t *testing.T) { @@ -60,12 +62,24 @@ func testBasic(t *testing.T, subDirPath string) { bucket, err := storageosProvider.NewReadWriteBucket(normalpath.Join("testdata/basic", subDirPath)) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + "finance/portfolio/proto", + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "finance/portfolio/proto", bucketTargeting.InputDirPath()) + require.NoError(t, err) + workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, bucket, - WithTargetSubDirPath( - "finance/portfolio/proto", - ), + bucketTargeting, ) require.NoError(t, err) module := workspace.GetModuleForOpaqueID("buf.testing/acme/bond") @@ -127,23 +141,29 @@ func testBasic(t *testing.T, subDirPath string) { _, err = module.StatFileInfo(ctx, "LICENSE") require.NoError(t, err) - //malformedDeps, err := MalformedDepsForWorkspace(workspace) - //require.NoError(t, err) - //require.Equal(t, 1, len(malformedDeps)) - //malformedDep := malformedDeps[0] - //require.Equal(t, "buf.testing/acme/extension", malformedDep.ModuleFullName().String()) - //require.Equal(t, MalformedDepTypeUndeclared, malformedDep.Type()) + bucketTargeting, err = buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + "common/money/proto", + []string{"common/money/proto/acme/money/v1/currency_code.proto"}, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, "common/money/proto", bucketTargeting.InputDirPath()) + require.Equal( + t, + []string{"common/money/proto/acme/money/v1/currency_code.proto"}, + bucketTargeting.TargetPaths(), + ) workspace, err = workspaceProvider.GetWorkspaceForBucket( ctx, bucket, - WithTargetSubDirPath( - "common/money/proto", - ), - WithTargetPaths( - []string{"common/money/proto/acme/money/v1/currency_code.proto"}, - nil, - ), + bucketTargeting, ) require.NoError(t, err) module = workspace.GetModuleForOpaqueID("buf.testing/acme/money") @@ -177,10 +197,24 @@ func TestUnusedDep(t *testing.T) { storageosProvider := storageos.NewProvider() bucket, err := storageosProvider.NewReadWriteBucket("testdata/basic/workspace_unused_dep") require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + bucket, + ".", + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) + require.NotNil(t, bucketTargeting.ControllingWorkspace()) + require.Equal(t, ".", bucketTargeting.ControllingWorkspace().Path()) + require.Equal(t, ".", bucketTargeting.InputDirPath()) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, bucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/buf/cmd/buf/buf_test.go b/private/buf/cmd/buf/buf_test.go index 700e1cfb16..5f662c33cb 100644 --- a/private/buf/cmd/buf/buf_test.go +++ b/private/buf/cmd/buf/buf_test.go @@ -26,12 +26,15 @@ import ( "github.com/bufbuild/buf/private/buf/bufcli" "github.com/bufbuild/buf/private/buf/bufctl" "github.com/bufbuild/buf/private/buf/cmd/buf/internal/internaltesting" + "github.com/bufbuild/buf/private/bufpkg/bufimage" + imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1" "github.com/bufbuild/buf/private/pkg/app/appcmd" "github.com/bufbuild/buf/private/pkg/app/appcmd/appcmdtesting" "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/bufbuild/buf/private/pkg/storage/storagetesting" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) var convertTestDataDir = filepath.Join("command", "convert", "testdata", "convert") @@ -241,8 +244,6 @@ func TestFail6(t *testing.T) { func TestFail7(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() testRunStdout( t, nil, @@ -257,9 +258,10 @@ func TestFail7(t *testing.T) { t, nil, bufctl.ExitCodeFileAnnotation, - filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory "fail/buf". -testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, such as "one_two".`), - "", // stderr should be empty + "", // stdout is empty + // This is new behavior we introduced. When setting a config override, we no longer do + // a search for the controlling workspace. See bufctl/option.go for additional details. + filepath.FromSlash(`Failure: testdata/export/other/proto/unimported.proto: import "another.proto": file does not exist`), "lint", "--path", filepath.Join("testdata", "fail", "buf", "buf.proto"), @@ -275,7 +277,7 @@ testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, // during the refactor. This is actually more correct - pre-refactor, the CLI was acting // as if the buf.yaml at testdata/fail/buf.yaml mattered in some way. In fact, it doesn't - // you've said that you have overridden it entirely. - filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory "testdata/fail/buf". + filepath.FromSlash(`testdata/fail/buf/buf.proto:3:1:Files with package "other" must be within a directory "other" relative to root but were in directory ".". testdata/fail/buf/buf.proto:6:9:Field name "oneTwo" should be lower_snake_case, such as "one_two".`), "", // stderr should be empty "lint", @@ -336,8 +338,6 @@ func TestFail10(t *testing.T) { func TestFail11(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() testRunStdout( t, nil, @@ -1197,6 +1197,47 @@ breaking: ) } +func TestLsFilesOverlappingPaths(t *testing.T) { + t.Parallel() + // It should be OK to have paths that overlap, and ls-files (and other commands) + // should output the union. + testRunStdout( + t, + nil, + 0, + filepath.FromSlash(`testdata/paths/a/v3/a.proto +testdata/paths/a/v3/foo/bar.proto +testdata/paths/a/v3/foo/foo.proto`), + "ls-files", + filepath.Join("testdata", "paths"), + "--path", + filepath.Join("testdata", "paths", "a", "v3"), + "--path", + filepath.Join("testdata", "paths", "a", "v3", "foo"), + ) +} + +func TestBuildOverlappingPaths(t *testing.T) { + t.Parallel() + // This may differ from LsFilesOverlappingPaths as we do a build of an image here. + // Building of images results in bufmodule.ModuleSetToModuleReadBucketWithOnlyProtoFiles being + // called, which is the original source of the issue that resulted in this test. + testBuildLsFilesFormatImport( + t, + 0, + []string{ + `a/v3/a.proto`, + `a/v3/foo/bar.proto`, + `a/v3/foo/foo.proto`, + }, + filepath.Join("testdata", "paths"), + "--path", + filepath.Join("testdata", "paths", "a", "v3"), + "--path", + filepath.Join("testdata", "paths", "a", "v3", "foo"), + ) +} + func TestExportProto(t *testing.T) { t.Parallel() tempDir := t.TempDir() @@ -1299,8 +1340,6 @@ func TestExportExcludeImports(t *testing.T) { func TestExportPaths(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() tempDir := t.TempDir() testRunStdout( t, @@ -1326,8 +1365,6 @@ func TestExportPaths(t *testing.T) { func TestExportPathsAndExcludes(t *testing.T) { t.Parallel() - // TODO failing test - t.Skip() tempDir := t.TempDir() testRunStdout( t, @@ -1489,7 +1526,7 @@ func TestBuildWithPaths(t *testing.T) { ``, // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "build", filepath.Join("testdata", "paths"), "--path", @@ -1521,7 +1558,7 @@ func TestLintWithPaths(t *testing.T) { "", // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "lint", filepath.Join("testdata", "paths"), "--path", @@ -1985,7 +2022,7 @@ func TestFormatSingleFile(t *testing.T) { "format", filepath.Join("testdata", "format", "simple"), "-o", - filepath.Join(tempDir, "simple.formatted.proto"), + filepath.Join(tempDir, "simple.formatted"), ) testRunStdout( t, @@ -1993,7 +2030,7 @@ func TestFormatSingleFile(t *testing.T) { 0, ``, "format", - filepath.Join(tempDir, "simple.formatted.proto"), + filepath.Join(tempDir, "simple.formatted"), "-d", ) } @@ -2295,6 +2332,48 @@ func TestConvertRoundTrip(t *testing.T) { }) } +func TestProtoFileNoWorkspaceOrModule(t *testing.T) { + t.Parallel() + // We can build a simple proto file re that does not belong to any workspace or module + // based on the directory of the input. + testRunStdout( + t, + nil, + 0, + "", + "build", + filepath.Join("testdata", "protofileref", "noworkspaceormodule", "success", "simple.proto"), + ) + // However, we should fail if there is any complexity (e.g. an import that cannot be + // resolved) since there is no workspace or module config to base this off of. + testRunStdoutStderrNoWarn( + t, + nil, + bufctl.ExitCodeFileAnnotation, + "", // no stdout + filepath.FromSlash(`testdata/protofileref/noworkspaceormodule/fail/import.proto:3:8:import "`)+`google/type/date.proto": file does not exist`, + "build", + filepath.Join("testdata", "protofileref", "noworkspaceormodule", "fail", "import.proto"), + ) +} + +// testBuildLsFilesFormatImport does effectively an ls-files, but via doing a build of an Image, and then +// listing the files from the image as if --format=import was set. +func testBuildLsFilesFormatImport(t *testing.T, expectedExitCode int, expectedFiles []string, buildArgs ...string) { + buffer := bytes.NewBuffer(nil) + testRun(t, expectedExitCode, nil, buffer, append([]string{"build", "-o", "-"}, buildArgs...)...) + protoImage := &imagev1.Image{} + err := proto.Unmarshal(buffer.Bytes(), protoImage) + require.NoError(t, err) + image, err := bufimage.NewImageForProto(protoImage) + require.NoError(t, err) + var paths []string + for _, imageFile := range image.Files() { + paths = append(paths, imageFile.Path()) + } + require.Equal(t, expectedFiles, paths) +} + func testModInit(t *testing.T, expectedData string, document bool, name string, deps ...string) { tempDir := t.TempDir() baseArgs := []string{"mod", "init"} diff --git a/private/buf/cmd/buf/command/generate/generate_test.go b/private/buf/cmd/buf/command/generate/generate_test.go index 34c47033e4..d7d4b78451 100644 --- a/private/buf/cmd/buf/command/generate/generate_test.go +++ b/private/buf/cmd/buf/command/generate/generate_test.go @@ -224,7 +224,7 @@ func TestOutputWithPathEqualToExclude(t *testing.T) { nil, 1, ``, - filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: a/v1/a.proto`), + filepath.FromSlash(`Failure: cannot set the same path for both --path and --exclude-path: "testdata/paths/a/v1/a.proto"`), "--output", tempDirPath, "--template", diff --git a/private/buf/cmd/buf/command/generate/generate_unix_test.go b/private/buf/cmd/buf/command/generate/generate_unix_test.go index 23340739fd..7eab96dccf 100644 --- a/private/buf/cmd/buf/command/generate/generate_unix_test.go +++ b/private/buf/cmd/buf/command/generate/generate_unix_test.go @@ -26,8 +26,6 @@ import ( ) func TestProtoFileRef(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() testRunSuccess( @@ -79,12 +77,16 @@ func TestOutputWithExclude(t *testing.T) { } func TestOutputWithPathWithinExclude(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "testdata/paths/a" contains targeted path "testdata/paths/a/v1/a.proto", which means all paths in "testdata/paths/a/v1/a.proto" will be excluded`), "--output", tempDirPath, "--template", @@ -94,11 +96,6 @@ func TestOutputWithPathWithinExclude(t *testing.T) { "--exclude-path", filepath.Join("testdata", "paths", "a"), ) - - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Contains(t, err.Error(), "no such file or directory") } func TestOutputWithExcludeWithinPath(t *testing.T) { @@ -128,12 +125,16 @@ func TestOutputWithExcludeWithinPath(t *testing.T) { } func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "testdata/paths/a/v3" contains targeted path "testdata/paths/a/v3/foo", which means all paths in "testdata/paths/a/v3/foo" will be excluded`), "--output", tempDirPath, "--template", @@ -146,32 +147,19 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { filepath.Join("testdata", "paths", "a", "v3", "foo"), filepath.Join("testdata", "paths"), ) - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "FooOuterClass.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "b", "v1", "B.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "BarOuterClass.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") } func TestWorkspaceGenerateWithExcludeAndTargetPaths(t *testing.T) { - // TODO: failing test, un-skip this once --path and --exclude-path are updated - t.Skip() t.Parallel() tempDirPath := t.TempDir() - testRunSuccess( + // This is new post-refactor. Before, we gave precedence to --path. While a change, + // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. + testRunStdoutStderr( t, + nil, + 1, + "", + filepath.FromSlash(`Failure: excluded path "a/v3" contains targeted path "a/v3/foo", which means all paths in "a/v3/foo" will be excluded`), "--output", tempDirPath, "--template", @@ -186,21 +174,4 @@ func TestWorkspaceGenerateWithExcludeAndTargetPaths(t *testing.T) { filepath.Join("testdata", "workspace", "b", "v1", "foo.proto"), filepath.Join("testdata", "workspace"), ) - _, err := os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "FooOuterClass.java")) - require.NoError(t, err) - _, err = os.Stat(filepath.Join(tempDirPath, "java", "b", "v1", "B.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v1", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v2", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "A.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") - _, err = os.Stat(filepath.Join(tempDirPath, "java", "a", "v3", "foo", "BarOuterClass.java")) - require.Error(t, err) - require.Contains(t, err.Error(), "no such file or directory") } diff --git a/private/buf/cmd/buf/command/generate/generate_windows_test.go b/private/buf/cmd/buf/command/generate/generate_windows_test.go index 72701888bc..71198d923e 100644 --- a/private/buf/cmd/buf/command/generate/generate_windows_test.go +++ b/private/buf/cmd/buf/command/generate/generate_windows_test.go @@ -127,7 +127,7 @@ func TestOutputWithNestedExcludeAndTargetPaths(t *testing.T) { ``, // This is new post-refactor. Before, we gave precedence to --path. While a change, // doing --path foo/bar --exclude-path foo seems like a bug rather than expected behavior to maintain. - `Failure: excluded path "a\v3" contains targeted path "a\v3\foo", which means all paths in "a\v3\foo" will be excluded`, + `Failure: excluded path "testdata\paths\a\v3" contains targeted path "testdata\paths\a\v3\foo", which means all paths in "testdata\paths\a\v3\foo" will be excluded`, "--output", tempDirPath, "--template", diff --git a/private/buf/cmd/buf/command/lsfiles/lsfiles.go b/private/buf/cmd/buf/command/lsfiles/lsfiles.go index 93b678554b..b3d286f00b 100644 --- a/private/buf/cmd/buf/command/lsfiles/lsfiles.go +++ b/private/buf/cmd/buf/command/lsfiles/lsfiles.go @@ -39,6 +39,8 @@ const ( errorFormatFlagName = "error-format" includeImportsFlagName = "include-imports" includeImportableFlagName = "include-importable" + pathsFlagName = "path" + excludePathsFlagName = "exclude-path" disableSymlinksFlagName = "disable-symlinks" asImportPathsFlagName = "as-import-paths" @@ -76,6 +78,8 @@ type flags struct { Config string IncludeImports bool IncludeImportable bool + Paths []string + ExcludePaths []string DisableSymlinks bool // Deprecated. This flag no longer has any effect as we don't build images anymore. ErrorFormat string @@ -91,6 +95,8 @@ func newFlags() *flags { func (f *flags) Bind(flagSet *pflag.FlagSet) { bufcli.BindInputHashtag(flagSet, &f.InputHashtag) + bufcli.BindPaths(flagSet, &f.Paths, pathsFlagName) + bufcli.BindExcludePaths(flagSet, &f.ExcludePaths, excludePathsFlagName) bufcli.BindDisableSymlinks(flagSet, &f.DisableSymlinks, disableSymlinksFlagName) flagSet.StringVar( &f.Config, @@ -166,6 +172,7 @@ func run( imageFileInfos, err := controller.GetImportableImageFileInfos( ctx, input, + bufctl.WithTargetPaths(flags.Paths, flags.ExcludePaths), bufctl.WithConfigOverride(flags.Config), ) if err != nil { diff --git a/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto new file mode 100644 index 0000000000..3fa67867af --- /dev/null +++ b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/fail/import.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +import "google/type/date.proto"; + +message A { + google.type.Date date = 1; +} diff --git a/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto new file mode 100644 index 0000000000..94ab31a637 --- /dev/null +++ b/private/buf/cmd/buf/testdata/protofileref/noworkspaceormodule/success/simple.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package simple; + +message Foo { + string key = 1; +} diff --git a/private/buf/cmd/buf/workspace_test.go b/private/buf/cmd/buf/workspace_test.go index 56cb4875c8..f0037ba4ae 100644 --- a/private/buf/cmd/buf/workspace_test.go +++ b/private/buf/cmd/buf/workspace_test.go @@ -1121,26 +1121,17 @@ func TestWorkspaceInputOverlapFail(t *testing.T) { // The target input cannot overlap with any of the directories defined // in the workspace. t.Parallel() - // TODO failing test - t.Skip() testRunStdoutStderrNoWarn( t, nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "proto/buf" because it is contained by directory "proto" listed in testdata/workspace/fail/overlap/buf.work.yaml`), + `Failure: failed to build input "proto/buf" because it is contained by module at path "proto" specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path`, "build", filepath.Join("testdata", "workspace", "fail", "overlap", "proto", "buf"), ) - testRunStdoutStderrNoWarn( - t, - nil, - 1, - ``, - filepath.FromSlash(`Failure: failed to build input "other" because it contains directory "other/proto" listed in testdata/workspace/success/dir/buf.work.yaml`), - "build", - filepath.Join("testdata", "workspace", "success", "dir", "other"), - ) + // This works because of our fallback logic, so we build the workspace at testdata/workspace/success/dir + testRunStdout(t, nil, 0, ``, "build", filepath.Join("testdata", "workspace", "success", "dir", "other")) } func TestWorkspaceNoVersionFail(t *testing.T) { @@ -1196,7 +1187,7 @@ func TestWorkspaceWithWorkspacePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input is equal to a value of --path - this has no effect and is disallowed`, + `Failure: given input is equal to a value of --path, this has no effect and is disallowed`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--path", @@ -1207,7 +1198,7 @@ func TestWorkspaceWithWorkspacePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input is equal to a value of --path - this has no effect and is disallowed`, + `Failure: given input is equal to a value of --path, this has no effect and is disallowed`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--path", @@ -1222,7 +1213,7 @@ func TestWorkspaceWithWorkspaceExcludePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input . is equal to a value of --exclude-path . - this would exclude everything`, + `Failure: given input "." is equal to a value of --exclude-path ".", this would exclude everything`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--exclude-path", @@ -1233,7 +1224,7 @@ func TestWorkspaceWithWorkspaceExcludePathFail(t *testing.T) { nil, 1, ``, - `Failure: given input . is equal to a value of --exclude-path . - this would exclude everything`, + `Failure: given input "." is equal to a value of --exclude-path ".", this would exclude everything`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--exclude-path", @@ -1249,7 +1240,7 @@ func TestWorkspaceWithWorkspaceDirectoryPathFail(t *testing.T) { nil, 1, ``, - `Failure: module "proto" was specified with --path - specify this module path directly as an input`, + `Failure: module "proto" was specified with --path, specify this module path directly as an input`, "lint", filepath.Join("testdata", "workspace", "success", "dir"), "--path", @@ -1260,7 +1251,7 @@ func TestWorkspaceWithWorkspaceDirectoryPathFail(t *testing.T) { nil, 1, ``, - `Failure: module "proto" was specified with --path - specify this module path directly as an input`, + `Failure: module "proto" was specified with --path, specify this module path directly as an input`, "lint", filepath.Join("testdata", "workspace", "success", "v2", "dir"), "--path", @@ -1383,7 +1374,7 @@ func TestWorkspaceWithInvalidArchiveAbsolutePathFail(t *testing.T) { 1, ``, filepath.FromSlash(fmt.Sprintf( - `Failure: %s/proto/rpc.proto: absolute paths cannot be used for this input type`, + `Failure: %s/proto/rpc.proto: expected to be relative`, wd, )), "lint", @@ -1402,7 +1393,7 @@ func TestWorkspaceWithInvalidArchiveAbsolutePathFail(t *testing.T) { 1, ``, filepath.FromSlash(fmt.Sprintf( - `Failure: %s/proto/rpc.proto: absolute paths cannot be used for this input type`, + `Failure: %s/proto/rpc.proto: expected to be relative`, wd, )), "lint", diff --git a/private/buf/cmd/buf/workspace_unix_test.go b/private/buf/cmd/buf/workspace_unix_test.go index fc9776d697..8cdfa31200 100644 --- a/private/buf/cmd/buf/workspace_unix_test.go +++ b/private/buf/cmd/buf/workspace_unix_test.go @@ -175,7 +175,7 @@ func TestWorkspaceGit(t *testing.T) { nil, bufctl.ExitCodeFileAnnotation, filepath.FromSlash(`private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Files with package "example" must be within a directory "example" relative to root but were in directory ".". - private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), + private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), "lint", "../../../../.git#ref=HEAD,subdir=private/buf/cmd/buf/testdata/workspace/success/dir/proto", ) @@ -184,7 +184,7 @@ func TestWorkspaceGit(t *testing.T) { nil, bufctl.ExitCodeFileAnnotation, filepath.FromSlash(`private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Files with package "example" must be within a directory "example" relative to root but were in directory ".". - private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), + private/buf/cmd/buf/testdata/workspace/success/dir/proto/rpc.proto:3:1:Package name "example" should be suffixed with a correctly formed version, such as "example.v1".`), "lint", "../../../../.git#ref=HEAD,subdir=private/buf/cmd/buf/testdata/workspace/success/dir/proto", "--path", diff --git a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go index 77f676298c..a363ac19a0 100644 --- a/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go +++ b/private/buf/cmd/buf/workspacetests/other/proto/workspacetest/workspace_test.go @@ -172,14 +172,12 @@ func TestWorkspaceSubDirectory(t *testing.T) { func TestWorkspaceOverlapSubDirectory(t *testing.T) { // Specify an overlapping input in a sub-directory. t.Parallel() - // TODO failing test - t.Skip() testRunStdoutStderr( t, nil, 1, ``, - filepath.FromSlash(`Failure: failed to build input "other/proto/one" because it is contained by directory "other/proto" listed in ../../../buf.work.yaml`), + `Failure: failed to build input "other/proto/one" because it is contained by module at path "other/proto" specified in your configuration, you must provide the workspace or module as the input, and filter to this path using --path`, "build", filepath.Join("..", "one"), ) diff --git a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go index 7c795db4d5..8433f6288b 100644 --- a/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go +++ b/private/bufpkg/bufcheck/bufbreaking/bufbreaking_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/buf/bufworkspace" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufanalysis/bufanalysistesting" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) func TestRunBreakingEnumNoDelete(t *testing.T) { @@ -753,6 +755,7 @@ func testBreaking( ) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + logger := zaptest.NewLogger(t) previousDirPath := filepath.Join("testdata_previous", relDirPath) dirPath := filepath.Join("testdata", relDirPath) @@ -763,11 +766,31 @@ func testBreaking( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + previousBucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + previousReadWriteBucket, + ".", // the bucket is rooted at the input + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) readWriteBucket, err := storageosProvider.NewReadWriteBucket( dirPath, storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + logger, + readWriteBucket, + ".", // the bucket is rooted at the input + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) workspaceProvider := bufworkspace.NewWorkspaceProvider( zap.NewNop(), @@ -779,11 +802,13 @@ func testBreaking( previousWorkspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, previousReadWriteBucket, + previousBucketTargeting, ) require.NoError(t, err) workspace, err := workspaceProvider.GetWorkspaceForBucket( ctx, readWriteBucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/bufpkg/bufcheck/buflint/buflint_test.go b/private/bufpkg/bufcheck/buflint/buflint_test.go index 8a48f4fc05..54a661156d 100644 --- a/private/bufpkg/bufcheck/buflint/buflint_test.go +++ b/private/bufpkg/bufcheck/buflint/buflint_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/bufbuild/buf/private/buf/buftarget" "github.com/bufbuild/buf/private/buf/bufworkspace" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" "github.com/bufbuild/buf/private/bufpkg/bufanalysis/bufanalysistesting" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) // Hint on how to get these: @@ -1167,6 +1169,16 @@ func testLintWithOptions( storageos.ReadWriteBucketWithSymlinksIfSupported(), ) require.NoError(t, err) + bucketTargeting, err := buftarget.NewBucketTargeting( + ctx, + zaptest.NewLogger(t), + readWriteBucket, + ".", // the bucket is rooted at the input + nil, + nil, + buftarget.TerminateAtControllingWorkspace, + ) + require.NoError(t, err) workspace, err := bufworkspace.NewWorkspaceProvider( zap.NewNop(), tracing.NopTracer, @@ -1176,6 +1188,7 @@ func testLintWithOptions( ).GetWorkspaceForBucket( ctx, readWriteBucket, + bucketTargeting, ) require.NoError(t, err) diff --git a/private/bufpkg/bufconfig/terminate.go b/private/bufpkg/bufconfig/terminate.go deleted file mode 100644 index 014f902de5..0000000000 --- a/private/bufpkg/bufconfig/terminate.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2020-2024 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bufconfig - -import ( - "context" - "errors" - "fmt" - "io/fs" - - "github.com/bufbuild/buf/private/pkg/normalpath" - "github.com/bufbuild/buf/private/pkg/slicesext" - "github.com/bufbuild/buf/private/pkg/storage" -) - -// FindControllingWorkspaceResult is a result from FindControllingWorkspace. -type FindControllingWorkspaceResult interface { - // If true, a controlling workspace was found. - Found() bool - // If non-empty, this indicates that the found workspace was a buf.work.yaml, - // and the directory paths were these paths. - // - // These paths include the input prefix, and are the full paths to the directories - // within the input bucket. - // - // If empty, this indicates that the found workspace was a buf.yaml. - BufWorkYAMLDirPaths() []string - - isFindControllingWorkspaceResult() -} - -// FindControllingWorkspace searches for a workspace file at prefix that controls originalSubDirPath. -// -// # A workspace file is either a buf.work.yaml file or a v2 buf.yaml file -// -// The workspace file controls originalSubDirPath if either: -// 1. prefix == originalSubDirPath, that is we're just directly targeting originalSubDirPath. -// 3. The workspace file refers to the originalSubDirPath via "directories" in buf.work.yaml or "directory" in buf.yaml. -// -// This is used by both buffetch/internal.Reader via the Prefix functions, and NewWorkspaceForBucket, -// which do their own independent searches and do not depend on each other. -func FindControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (FindControllingWorkspaceResult, error) { - return findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, true) -} - -// TerminateForNonProtoFileRef returns true if the bucket contains a workspace file -// that controls originalSubDirPath at the prefix. -// -// See the commentary on FindControllingWorkspace. -// -// This is used by buffetch when searching for the root of the workspace. -// See buffetch/internal.WithGetBucketTerminateFunc for more information. -func TerminateAtControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (bool, error) { - findControllingWorkspaceResult, err := findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, true) - if err != nil { - return false, err - } - return findControllingWorkspaceResult.Found(), nil -} - -// TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef returns true if the bucket contains -// either a module file or workspace file at the prefix. -// -// A module file is either a v1 or v1beta1 buf.yaml or buf.mod file, or a v2 buf.yaml file that -// has a module with directory ".". This is configuration for a module rooted at this directory. -// -// A workspace file is either a buf.work.yaml file or a v2 buf.yaml file. -// -// As opposed to TerminateForNonProtoFileRef, this does not require the prefix to point to the -// originalSubDirPath - ProtoFileRefs assume that if you have a workspace file, it controls the ProtoFileRef. -// -// This is used by buffetch when searching for the root of the module when dealing with ProtoFileRefs. -// See buffetch/internal.WithGetBucketProtoFileTerminateFunc for more information. -func TerminateAtEnclosingModuleOrWorkspaceForProtoFileRef( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, -) (bool, error) { - findControllingWorkspaceResult, err := findControllingWorkspace(ctx, bucket, prefix, originalSubDirPath, false) - if err != nil { - return false, err - } - if findControllingWorkspaceResult.Found() { - return true, nil - } - - bufYAMLFile, err := GetBufYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return false, nil - } - return false, err - } - switch fileVersion := bufYAMLFile.FileVersion(); fileVersion { - case FileVersionV1, FileVersionV1Beta1: - // If we have a v1 or v1beta1 buf.yaml, we automatically know this is a module file. - return true, nil - case FileVersionV2: - // If we have a v2, this is only a "module file" if it contains a module with directory ".". - // Otherwise, we don't want to stop here. Remember, this only wants to stop at module files, - // not workspace files. - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - if moduleConfig.DirPath() == "." { - return true, nil - } - } - return false, nil - default: - return false, fmt.Errorf("unknown FileVersion: %v", fileVersion) - } -} - -// *** PRIVATE *** - -type findControllingWorkspaceResult struct { - found bool - bufWorkYAMLDirPaths []string -} - -func newFindControllingWorkspaceResult( - found bool, - bufWorkYAMLDirPaths []string, -) *findControllingWorkspaceResult { - return &findControllingWorkspaceResult{ - found: found, - bufWorkYAMLDirPaths: bufWorkYAMLDirPaths, - } -} - -func (f *findControllingWorkspaceResult) Found() bool { - return f.found -} - -func (f *findControllingWorkspaceResult) BufWorkYAMLDirPaths() []string { - return f.bufWorkYAMLDirPaths -} - -func (*findControllingWorkspaceResult) isFindControllingWorkspaceResult() {} - -// findControllingWorkspace adds the property that the prefix may not be required to point to originalSubDirPath. -// -// We don't require the workspace file to point to originalSubDirPath when finding the enclosing module or -// workspace for a ProtoFileRef. -func findControllingWorkspace( - ctx context.Context, - bucket storage.ReadBucket, - prefix string, - originalSubDirPath string, - requirePrefixWorkspaceToPointToOriginalSubDirPath bool, -) (FindControllingWorkspaceResult, error) { - bufWorkYAMLFile, err := GetBufWorkYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, err - } - bufWorkYAMLExists := err == nil - bufYAMLFile, err := GetBufYAMLFileForPrefix(ctx, bucket, prefix) - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, err - } - bufYAMLExists := err == nil - if bufWorkYAMLExists && bufYAMLExists { - // This isn't actually the external directory path, but we do the best we can here for now. - return nil, fmt.Errorf("cannot have a buf.work.yaml and buf.yaml in the same directory %q", prefix) - } - - // Find the relative path of our original target subDirPath vs where we currently are. - // We only stop the loop if a v2 buf.yaml or a buf.work.yaml lists this directory, - // or if the original target subDirPath points ot the workspace file itself. - // - // Example: we inputted foo/bar/baz, we're currently at foo. We want to make sure - // that buf.work.yaml lists bar/baz as a directory. If so, this buf.work.yaml - // relates to our current directory. - // - // Example: we inputted foo/bar/baz, we're at foo/bar/baz. Great. - relDirPath, err := normalpath.Rel(prefix, originalSubDirPath) - if err != nil { - return nil, err - } - if bufYAMLExists && bufYAMLFile.FileVersion() == FileVersionV2 { - if !requirePrefixWorkspaceToPointToOriginalSubDirPath { - // We don't require the workspace to point to the prefix (likely because we're - // finding the controlling workspace for a ProtoFileRef), we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - if prefix == originalSubDirPath { - // We've referred to our workspace file directly, we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - dirPathMap := make(map[string]struct{}) - for _, moduleConfig := range bufYAMLFile.ModuleConfigs() { - dirPathMap[moduleConfig.DirPath()] = struct{}{} - } - if _, ok := dirPathMap[relDirPath]; ok { - // This workspace file refers to curDirPath, we're good to go. - return newFindControllingWorkspaceResult(true, nil), nil - } - } - if bufWorkYAMLExists { - _, refersToCurDirPath := slicesext.ToStructMap(bufWorkYAMLFile.DirPaths())[relDirPath] - if prefix == originalSubDirPath || refersToCurDirPath || !requirePrefixWorkspaceToPointToOriginalSubDirPath { - // We don't actually need to parse the buf.work.yaml again - we have all the information - // we need. Just figure out the actual paths within the bucket of the modules, and go - // right to newWorkspaceForBucketAndModuleDirPathsV1Beta1OrV1. - moduleDirPaths := make([]string, len(bufWorkYAMLFile.DirPaths())) - for i, dirPath := range bufWorkYAMLFile.DirPaths() { - moduleDirPaths[i] = normalpath.Join(prefix, dirPath) - } - return newFindControllingWorkspaceResult(true, moduleDirPaths), nil - } - } - return newFindControllingWorkspaceResult(false, nil), nil -} diff --git a/private/bufpkg/bufimage/bufimage.go b/private/bufpkg/bufimage/bufimage.go index 4cd11228a2..03efba9639 100644 --- a/private/bufpkg/bufimage/bufimage.go +++ b/private/bufpkg/bufimage/bufimage.go @@ -17,6 +17,7 @@ package bufimage import ( "context" "fmt" + "io/fs" "sort" "strings" @@ -799,7 +800,7 @@ func imageFileInfosWithOnlyTargetsAndTargetImportsRec( for _, imp := range imports { importImageFileInfo, ok := pathToImageFileInfo[imp] if !ok { - return fmt.Errorf("no ImageFileInfo for import %q", imp) + return fmt.Errorf("%s: import %q: %w", imageFileInfo.ExternalPath(), imp, fs.ErrNotExist) } if err := imageFileInfosWithOnlyTargetsAndTargetImportsRec(importImageFileInfo, pathToImageFileInfo, resultPaths); err != nil { return err diff --git a/private/bufpkg/bufmodule/module_dep.go b/private/bufpkg/bufmodule/module_dep.go index 6b55351278..9af13b475c 100644 --- a/private/bufpkg/bufmodule/module_dep.go +++ b/private/bufpkg/bufmodule/module_dep.go @@ -97,7 +97,6 @@ func getModuleDeps( if err := protoFileTracker.validate(); err != nil { return nil, err } - moduleDeps := make([]ModuleDep, 0, len(depOpaqueIDToModuleDep)) for _, moduleDep := range depOpaqueIDToModuleDep { moduleDeps = append(moduleDeps, moduleDep) diff --git a/private/bufpkg/bufmodule/module_read_bucket.go b/private/bufpkg/bufmodule/module_read_bucket.go index 6c3ce98cb1..a5601fe629 100644 --- a/private/bufpkg/bufmodule/module_read_bucket.go +++ b/private/bufpkg/bufmodule/module_read_bucket.go @@ -331,7 +331,6 @@ func (b *moduleReadBucket) WalkFileInfos( // Note that we must verify that at least one file in this ModuleReadBucket is // a .proto file, per the documentation on Module. protoFileTracker := newProtoFileTracker() - protoFileTracker.trackModule(b.module) walkFileInfosOptions := newWalkFileInfosOptions() for _, option := range options { @@ -343,6 +342,10 @@ func (b *moduleReadBucket) WalkFileInfos( } if !walkFileInfosOptions.onlyTargetFiles { + // We only want to call trackModule if we are walking all the files, not just + // the target files. By not calling trackModule outside of this if statement, + // we will not produce NoProtoFilesErrors, per the documention on trackModule. + protoFileTracker.trackModule(b.module) if err := bucket.Walk( ctx, "", @@ -360,6 +363,11 @@ func (b *moduleReadBucket) WalkFileInfos( return protoFileTracker.validate() } + // If we are walking all files, then we track the module if it is the target module + if b.module.IsTarget() { + protoFileTracker.trackModule(b.module) + } + targetFileWalkFunc := func(objectInfo storage.ObjectInfo) error { fileInfo, err := b.getFileInfo(ctx, objectInfo) if err != nil { @@ -381,9 +389,21 @@ func (b *moduleReadBucket) WalkFileInfos( // // Use targetPaths instead of targetPathMap to have a deterministic iteration order at this level. if len(b.targetPaths) > 0 { + // Target paths may have overlapping files, for example if you do --path a --path a/b, + // you get the union of the files. We need to make sure that we only walk a given + // file path once. + seenPaths := make(map[string]struct{}) + multiTargetFileWalkFunc := func(objectInfo storage.ObjectInfo) error { + path := objectInfo.Path() + if _, ok := seenPaths[path]; ok { + return nil + } + seenPaths[path] = struct{}{} + return targetFileWalkFunc(objectInfo) + } for _, targetPath := range b.targetPaths { // Still need to determine IsTargetFile as a file could be excluded with excludeTargetPaths. - if err := bucket.Walk(ctx, targetPath, targetFileWalkFunc); err != nil { + if err := bucket.Walk(ctx, targetPath, multiTargetFileWalkFunc); err != nil { return err } } diff --git a/private/bufpkg/bufmodule/module_set_builder.go b/private/bufpkg/bufmodule/module_set_builder.go index 9a55ba4435..5bc4f1be79 100644 --- a/private/bufpkg/bufmodule/module_set_builder.go +++ b/private/bufpkg/bufmodule/module_set_builder.go @@ -408,7 +408,6 @@ func (b *moduleSetBuilder) Build() (_ ModuleSet, retErr error) { modules, err := slicesext.MapError( addedModules, func(addedModule *addedModule) (Module, error) { - // This context ends up getting stored on the Module, is this a problem re: tracing? return addedModule.ToModule(ctx, b.moduleDataProvider) }, )