diff --git a/doc/plugin_server_bundlepublisher_gcp_cloudstorage.md b/doc/plugin_server_bundlepublisher_gcp_cloudstorage.md new file mode 100644 index 0000000000..dfe7d530f1 --- /dev/null +++ b/doc/plugin_server_bundlepublisher_gcp_cloudstorage.md @@ -0,0 +1,69 @@ +# Server plugin: BundlePublisher "gcp_cloudstorage" + +The `gcp_cloudstorage` plugin puts the current trust bundle of the server in a designated +Google Cloud Storage bucket, keeping it updated. + +The plugin accepts the following configuration options: + +| Configuration | Description | Required | Default | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------|----------------------------------------------------------------| +| service_account_file | Path to the service account file used to authenticate with the Cloud Storage API. | No. | Value of `GOOGLE_APPLICATION_CREDENTIALS` environment variable.| +| bucket_name | The Google Cloud Storage bucket name to which the trust bundle is uploaded. | Yes. | | +| object_name | The object name inside the bucket. | Yes. | | +| format | Format in which the trust bundle is stored, <spiffe | jwks | pem>. See [Supported bundle formats](#supported-bundle-formats) for more details. | Yes. | | + +## Supported bundle formats + +The following bundle formats are supported: + +### SPIFFE format + +The trust bundle is represented as an RFC 7517 compliant JWK Set, with the specific parameters defined in the [SPIFFE Trust Domain and Bundle specification](https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE_Trust_Domain_and_Bundle.md#4-spiffe-bundle-format). Both the JWT authorities and the X.509 authorities are included. + +### JWKS format + +The trust bundle is encoded as an RFC 7517 compliant JWK Set, omitting SPIFFE-specific parameters. Both the JWT authorities and the X.509 authorities are included. + +### PEM format + +The trust bundle is formatted using PEM encoding. Only the X.509 authorities are included. + +## Required permissions + +The plugin requires the following IAM permissions be granted to the authenticated service account in the configured bucket: + +```text +storage.objects.create +storage.objects.delete +``` + +The `storage.objects.delete` permission is required to overwrite the object when the bundle is updated. + +## Sample configuration using Application Default Credentials + +The following configuration uploads the local trust bundle contents to the `example.org` object in the `spire-bundle` bucket. Since `service_account_file` is not configured, [Application Default Credentials](https://cloud.google.com/docs/authentication/client-libraries#adc) are used. + +```hcl + BundlePublisher "gcp_cloudstorage" { + plugin_data { + bucket = "spire-bundle" + object_name = "example.org" + format = "spiffe" + } + } +``` + +## Sample configuration using service account file + +The following configuration uploads the local trust bundle contents to the `example.org` object in the `spire-bundle` bucket. Since `service_account_file` is configured, authentication to the Cloud Storage API is done with the given service account file. + +```hcl + BundlePublisher "gcp_cloudstorage" { + plugin_data { + service_account_file = "/path/to/service/account/file" + bucket = "spire-bundle" + object_name = "example.org" + format = "spiffe" + } + } +``` diff --git a/doc/spire_server.md b/doc/spire_server.md index 5b716f4d26..2b94a0397a 100644 --- a/doc/spire_server.md +++ b/doc/spire_server.md @@ -12,36 +12,37 @@ This document is a configuration reference for SPIRE Server. It includes informa | NodeAttestor | Implements validation logic for nodes attempting to assert their identity. Generally paired with an agent plugin of the same type. | | UpstreamAuthority | Allows SPIRE server to integrate with existing PKI systems. | | Notifier | Notified by SPIRE server for certain events that are happening or have happened. For events that are happening, the notifier can advise SPIRE server on the outcome. | -| BundlePublisher | Publishes trust bundles to additional locations. | +| BundlePublisher | Publishes the local trust bundle to a store. | ## Built-in plugins -| Type | Name | Description | -|--------------------|----------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| DataStore | [sql](/doc/plugin_server_datastore_sql.md) | An SQL database storage for SQLite, PostgreSQL and MySQL databases for the SPIRE datastore | -| KeyManager | [aws_kms](/doc/plugin_server_keymanager_aws_kms.md) | A key manager which manages keys in AWS KMS | -| KeyManager | [disk](/doc/plugin_server_keymanager_disk.md) | A key manager which manages keys persisted on disk | -| KeyManager | [memory](/doc/plugin_server_keymanager_memory.md) | A key manager which manages unpersisted keys in memory | -| CredentialComposer | [uniqueid](/doc/plugin_server_credentialcomposer_uniqueid.md) | Adds the x509UniqueIdentifier attribute to workload X509-SVIDs. | -| NodeAttestor | [aws_iid](/doc/plugin_server_nodeattestor_aws_iid.md) | A node attestor which attests agent identity using an AWS Instance Identity Document | -| NodeAttestor | [azure_msi](/doc/plugin_server_nodeattestor_azure_msi.md) | A node attestor which attests agent identity using an Azure MSI token | -| NodeAttestor | [gcp_iit](/doc/plugin_server_nodeattestor_gcp_iit.md) | A node attestor which attests agent identity using a GCP Instance Identity Token | -| NodeAttestor | [join_token](/doc/plugin_server_nodeattestor_jointoken.md) | A node attestor which validates agents attesting with server-generated join tokens | -| NodeAttestor | [k8s_sat](/doc/plugin_server_nodeattestor_k8s_sat.md) (deprecated) | A node attestor which attests agent identity using a Kubernetes Service Account token | -| NodeAttestor | [k8s_psat](/doc/plugin_server_nodeattestor_k8s_psat.md) | A node attestor which attests agent identity using a Kubernetes Projected Service Account token | -| NodeAttestor | [sshpop](/doc/plugin_server_nodeattestor_sshpop.md) | A node attestor which attests agent identity using an existing ssh certificate | -| NodeAttestor | [tpm_devid](/doc/plugin_server_nodeattestor_tpm_devid.md) | A node attestor which attests agent identity using a TPM that has been provisioned with a DevID certificate | -| NodeAttestor | [x509pop](/doc/plugin_server_nodeattestor_x509pop.md) | A node attestor which attests agent identity using an existing X.509 certificate | -| UpstreamAuthority | [disk](/doc/plugin_server_upstreamauthority_disk.md) | Uses a CA loaded from disk to sign SPIRE server intermediate certificates. | -| UpstreamAuthority | [aws_pca](/doc/plugin_server_upstreamauthority_aws_pca.md) | Uses a Private Certificate Authority from AWS Certificate Manager to sign SPIRE server intermediate certificates. | -| UpstreamAuthority | [awssecret](/doc/plugin_server_upstreamauthority_awssecret.md) | Uses a CA loaded from AWS SecretsManager to sign SPIRE server intermediate certificates. | -| UpstreamAuthority | [gcp_cas](/doc/plugin_server_upstreamauthority_gcp_cas.md) | Uses a Private Certificate Authority from GCP Certificate Authority Service to sign SPIRE Server intermediate certificates. | -| UpstreamAuthority | [vault](/doc/plugin_server_upstreamauthority_vault.md) | Uses a PKI Secret Engine from HashiCorp Vault to sign SPIRE server intermediate certificates. | -| UpstreamAuthority | [spire](/doc/plugin_server_upstreamauthority_spire.md) | Uses an upstream SPIRE server in the same trust domain to obtain intermediate signing certificates for SPIRE server. | -| UpstreamAuthority | [cert-manager](/doc/plugin_server_upstreamauthority_cert_manager.md) | Uses a referenced cert-manager Issuer to request intermediate signing certificates. | -| Notifier | [gcs_bundle](/doc/plugin_server_notifier_gcs_bundle.md) | A notifier that pushes the latest trust bundle contents into an object in Google Cloud Storage. | -| Notifier | [k8sbundle](/doc/plugin_server_notifier_k8sbundle.md) | A notifier that pushes the latest trust bundle contents into a Kubernetes ConfigMap. | -| BundlePublisher | [aws_s3](/doc/plugin_server_bundlepublisher_aws_s3.md) | Publishes trust bundles to an Amazon S3 bucket. | +| Type | Name | Description | +|--------------------|----------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| DataStore | [sql](/doc/plugin_server_datastore_sql.md) | An SQL database storage for SQLite, PostgreSQL and MySQL databases for the SPIRE datastore | +| KeyManager | [aws_kms](/doc/plugin_server_keymanager_aws_kms.md) | A key manager which manages keys in AWS KMS | +| KeyManager | [disk](/doc/plugin_server_keymanager_disk.md) | A key manager which manages keys persisted on disk | +| KeyManager | [memory](/doc/plugin_server_keymanager_memory.md) | A key manager which manages unpersisted keys in memory | +| CredentialComposer | [uniqueid](/doc/plugin_server_credentialcomposer_uniqueid.md) | Adds the x509UniqueIdentifier attribute to workload X509-SVIDs. | +| NodeAttestor | [aws_iid](/doc/plugin_server_nodeattestor_aws_iid.md) | A node attestor which attests agent identity using an AWS Instance Identity Document | +| NodeAttestor | [azure_msi](/doc/plugin_server_nodeattestor_azure_msi.md) | A node attestor which attests agent identity using an Azure MSI token | +| NodeAttestor | [gcp_iit](/doc/plugin_server_nodeattestor_gcp_iit.md) | A node attestor which attests agent identity using a GCP Instance Identity Token | +| NodeAttestor | [join_token](/doc/plugin_server_nodeattestor_jointoken.md) | A node attestor which validates agents attesting with server-generated join tokens | +| NodeAttestor | [k8s_sat](/doc/plugin_server_nodeattestor_k8s_sat.md) (deprecated) | A node attestor which attests agent identity using a Kubernetes Service Account token | +| NodeAttestor | [k8s_psat](/doc/plugin_server_nodeattestor_k8s_psat.md) | A node attestor which attests agent identity using a Kubernetes Projected Service Account token | +| NodeAttestor | [sshpop](/doc/plugin_server_nodeattestor_sshpop.md) | A node attestor which attests agent identity using an existing ssh certificate | +| NodeAttestor | [tpm_devid](/doc/plugin_server_nodeattestor_tpm_devid.md) | A node attestor which attests agent identity using a TPM that has been provisioned with a DevID certificate | +| NodeAttestor | [x509pop](/doc/plugin_server_nodeattestor_x509pop.md) | A node attestor which attests agent identity using an existing X.509 certificate | +| UpstreamAuthority | [disk](/doc/plugin_server_upstreamauthority_disk.md) | Uses a CA loaded from disk to sign SPIRE server intermediate certificates. | +| UpstreamAuthority | [aws_pca](/doc/plugin_server_upstreamauthority_aws_pca.md) | Uses a Private Certificate Authority from AWS Certificate Manager to sign SPIRE server intermediate certificates. | +| UpstreamAuthority | [awssecret](/doc/plugin_server_upstreamauthority_awssecret.md) | Uses a CA loaded from AWS SecretsManager to sign SPIRE server intermediate certificates. | +| UpstreamAuthority | [gcp_cas](/doc/plugin_server_upstreamauthority_gcp_cas.md) | Uses a Private Certificate Authority from GCP Certificate Authority Service to sign SPIRE Server intermediate certificates. | +| UpstreamAuthority | [vault](/doc/plugin_server_upstreamauthority_vault.md) | Uses a PKI Secret Engine from HashiCorp Vault to sign SPIRE server intermediate certificates. | +| UpstreamAuthority | [spire](/doc/plugin_server_upstreamauthority_spire.md) | Uses an upstream SPIRE server in the same trust domain to obtain intermediate signing certificates for SPIRE server. | +| UpstreamAuthority | [cert-manager](/doc/plugin_server_upstreamauthority_cert_manager.md) | Uses a referenced cert-manager Issuer to request intermediate signing certificates. | +| Notifier | [gcs_bundle](/doc/plugin_server_notifier_gcs_bundle.md) | A notifier that pushes the latest trust bundle contents into an object in Google Cloud Storage. | +| Notifier | [k8sbundle](/doc/plugin_server_notifier_k8sbundle.md) | A notifier that pushes the latest trust bundle contents into a Kubernetes ConfigMap. | +| BundlePublisher | [aws_s3](/doc/plugin_server_bundlepublisher_aws_s3.md) | Publishes the trust bundle to an Amazon S3 bucket. | +| BundlePublisher | [gcp_cloudstorage](/doc/plugin_server_bundlepublisher_gcp_cloudstorage.md) | Publishes the trust bundle to a Google Cloud Storage bucket. | ## Server configuration file diff --git a/pkg/server/catalog/bundlepublisher.go b/pkg/server/catalog/bundlepublisher.go index 1cdfcd44bd..fd425749f7 100644 --- a/pkg/server/catalog/bundlepublisher.go +++ b/pkg/server/catalog/bundlepublisher.go @@ -4,6 +4,7 @@ import ( "github.com/spiffe/spire/pkg/common/catalog" "github.com/spiffe/spire/pkg/server/plugin/bundlepublisher" "github.com/spiffe/spire/pkg/server/plugin/bundlepublisher/awss3" + "github.com/spiffe/spire/pkg/server/plugin/bundlepublisher/gcpcloudstorage" ) type bundlePublisherRepository struct { @@ -25,6 +26,7 @@ func (repo *bundlePublisherRepository) Versions() []catalog.Version { func (repo *bundlePublisherRepository) BuiltIns() []catalog.BuiltIn { return []catalog.BuiltIn{ awss3.BuiltIn(), + gcpcloudstorage.BuiltIn(), } } diff --git a/pkg/server/plugin/bundlepublisher/gcpcloudstorage/client.go b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/client.go new file mode 100644 index 0000000000..bb647fbce4 --- /dev/null +++ b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/client.go @@ -0,0 +1,22 @@ +package gcpcloudstorage + +import ( + "context" + "io" + + "cloud.google.com/go/storage" + "google.golang.org/api/option" +) + +type gcsService interface { + Bucket(name string) *storage.BucketHandle + Close() error +} + +func newGCSClient(ctx context.Context, opts ...option.ClientOption) (gcsService, error) { + return storage.NewClient(ctx, opts...) +} + +func newStorageWriter(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser { + return o.NewWriter(ctx) +} diff --git a/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage.go b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage.go new file mode 100644 index 0000000000..ea0c304e8f --- /dev/null +++ b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage.go @@ -0,0 +1,263 @@ +package gcpcloudstorage + +import ( + "context" + "io" + "sync" + + "cloud.google.com/go/storage" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcl" + "github.com/spiffe/spire-plugin-sdk/pluginsdk/support/bundleformat" + bundlepublisherv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/bundlepublisher/v1" + "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/types" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/telemetry" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +const ( + pluginName = "gcp_cloudstorage" +) + +type pluginHooks struct { + newGCSClientFunc func(ctx context.Context, opts ...option.ClientOption) (gcsService, error) + newStorageWriterFunc func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser + wroteObjectFunc func() // Test hook called when an object was written. +} + +func BuiltIn() catalog.BuiltIn { + return builtin(New()) +} + +func New() *Plugin { + return newPlugin(newGCSClient, newStorageWriter) +} + +// Config holds the configuration of the plugin. +type Config struct { + BucketName string `hcl:"bucket_name" json:"bucket_name"` + ObjectName string `hcl:"object_name" json:"object_name"` + Format string `hcl:"format" json:"format"` + ServiceAccountFile string `hcl:"service_account_file" json:"service_account_file"` + + // bundleFormat is used to store the content of Format, parsed + // as bundleformat.Format. + bundleFormat bundleformat.Format +} + +// Plugin is the main representation of this bundle publisher plugin. +type Plugin struct { + bundlepublisherv1.UnsafeBundlePublisherServer + configv1.UnsafeConfigServer + + config *Config + configMtx sync.RWMutex + + bundle *types.Bundle + bundleMtx sync.RWMutex + + hooks pluginHooks + gcsClient gcsService + log hclog.Logger +} + +// SetLogger sets a logger in the plugin. +func (p *Plugin) SetLogger(log hclog.Logger) { + p.log = log +} + +// Configure configures the plugin. +func (p *Plugin) Configure(ctx context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { + config, err := parseAndValidateConfig(req.HclConfiguration) + if err != nil { + return nil, err + } + + var opts []option.ClientOption + if config.ServiceAccountFile != "" { + opts = append(opts, option.WithCredentialsFile(config.ServiceAccountFile)) + } + + gcsClient, err := p.hooks.newGCSClientFunc(ctx, opts...) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create client: %v", err) + } + p.gcsClient = gcsClient + + p.setConfig(config) + p.setBundle(nil) + return &configv1.ConfigureResponse{}, nil +} + +// PublishBundle puts the bundle in the configured GCS bucket and object name. +func (p *Plugin) PublishBundle(ctx context.Context, req *bundlepublisherv1.PublishBundleRequest) (*bundlepublisherv1.PublishBundleResponse, error) { + config, err := p.getConfig() + if err != nil { + return nil, err + } + + if req.Bundle == nil { + return nil, status.Error(codes.InvalidArgument, "missing bundle in request") + } + + currentBundle := p.getBundle() + if proto.Equal(req.Bundle, currentBundle) { + // Bundle not changed. No need to publish. + return &bundlepublisherv1.PublishBundleResponse{}, nil + } + + formatter := bundleformat.NewFormatter(req.Bundle) + bundleBytes, err := formatter.Format(config.bundleFormat) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not format bundle: %v", err.Error()) + } + + bucketHandle := p.gcsClient.Bucket(config.BucketName) + if bucketHandle == nil { // Purely defensive, the Bucket function implemented in GCS always returns a BucketHandle. + return nil, status.Error(codes.Internal, "could not get bucket handle") + } + + objectHandle := bucketHandle.Object(config.ObjectName) + if objectHandle == nil { // Purely defensive, the Object function implemented in GCS always returns an ObjectHandle. + return nil, status.Error(codes.Internal, "could not get object handle") + } + + storageWriter := p.hooks.newStorageWriterFunc(ctx, objectHandle) + if storageWriter == nil { // Purely defensive, the NewWriter function implemented in GCS always returns a storage writer + return nil, status.Error(codes.Internal, "could not initialize storage writer") + } + + log := p.log.With( + "bucket_name", config.BucketName, + "object_name", config.ObjectName) + + _, err = storageWriter.Write(bundleBytes) + // The number of bytes written can be safely ignored. To determine if an + // object was successfully uploaded, we need to look at the error returned + // from storageWriter.Close(). + if err != nil { + // Close the storage writer before returning. + if closeErr := storageWriter.Close(); closeErr != nil { + log.With(telemetry.Error, closeErr).Error("Failed to close storage writer") + } + return nil, status.Errorf(codes.Internal, "failed to write bundle: %v", err) + } + + if err := storageWriter.Close(); err != nil { + return nil, status.Errorf(codes.Internal, "failed to close storage writer: %v", err) + } + + if p.hooks.wroteObjectFunc != nil { + p.hooks.wroteObjectFunc() + } + + p.setBundle(req.Bundle) + log.Debug("Bundle published") + return &bundlepublisherv1.PublishBundleResponse{}, nil +} + +// Close is called when the plugin is unloaded. Closes the client. +func (p *Plugin) Close() error { + if p.gcsClient == nil { + return nil + } + p.log.Debug("Closing the connection to the Cloud Storage API service") + return p.gcsClient.Close() +} + +// getBundle gets the latest bundle that the plugin has. +func (p *Plugin) getBundle() *types.Bundle { + p.configMtx.RLock() + defer p.configMtx.RUnlock() + + return p.bundle +} + +// getConfig gets the configuration of the plugin. +func (p *Plugin) getConfig() (*Config, error) { + p.configMtx.RLock() + defer p.configMtx.RUnlock() + + if p.config == nil { + return nil, status.Error(codes.FailedPrecondition, "not configured") + } + return p.config, nil +} + +// setBundle updates the current bundle in the plugin with the provided bundle. +func (p *Plugin) setBundle(bundle *types.Bundle) { + p.bundleMtx.Lock() + defer p.bundleMtx.Unlock() + + p.bundle = bundle +} + +// setConfig sets the configuration for the plugin. +func (p *Plugin) setConfig(config *Config) { + p.configMtx.Lock() + defer p.configMtx.Unlock() + + p.config = config +} + +// builtin creates a new BundlePublisher built-in plugin. +func builtin(p *Plugin) catalog.BuiltIn { + return catalog.MakeBuiltIn(pluginName, + bundlepublisherv1.BundlePublisherPluginServer(p), + configv1.ConfigServiceServer(p), + ) +} + +// newPlugin returns a new plugin instance. +func newPlugin(newGCSClientFunc func(ctx context.Context, opts ...option.ClientOption) (gcsService, error), + newStorageWriterFunc func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser) *Plugin { + return &Plugin{ + hooks: pluginHooks{ + newGCSClientFunc: newGCSClientFunc, + newStorageWriterFunc: newStorageWriterFunc, + }, + } +} + +// parseAndValidateConfig returns an error if any configuration provided does +// not meet acceptable criteria +func parseAndValidateConfig(c string) (*Config, error) { + config := new(Config) + + if err := hcl.Decode(config, c); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "unable to decode configuration: %v", err) + } + + if config.BucketName == "" { + return nil, status.Error(codes.InvalidArgument, "configuration is missing the bucket name") + } + + if config.ObjectName == "" { + return nil, status.Error(codes.InvalidArgument, "configuration is missing the object name") + } + + if config.Format == "" { + return nil, status.Error(codes.InvalidArgument, "configuration is missing the bundle format") + } + bundleFormat, err := bundleformat.FromString(config.Format) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "could not parse bundle format from configuration: %v", err) + } + // The bundleformat package may support formats that this plugin does not + // support. Validate that the format is a supported format in this plugin. + switch bundleFormat { + case bundleformat.JWKS: + case bundleformat.SPIFFE: + case bundleformat.PEM: + default: + return nil, status.Errorf(codes.InvalidArgument, "format not supported %q", config.Format) + } + + config.bundleFormat = bundleFormat + return config, nil +} diff --git a/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage_test.go b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage_test.go new file mode 100644 index 0000000000..a8233217ab --- /dev/null +++ b/pkg/server/plugin/bundlepublisher/gcpcloudstorage/gcpcloudstorage_test.go @@ -0,0 +1,406 @@ +package gcpcloudstorage + +import ( + "context" + "crypto/x509" + "errors" + "io" + "testing" + + "cloud.google.com/go/storage" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire-plugin-sdk/pluginsdk/support/bundleformat" + bundlepublisherv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/bundlepublisher/v1" + "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/types" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/test/plugintest" + "github.com/spiffe/spire/test/spiretest" + "github.com/spiffe/spire/test/util" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" +) + +func TestConfigure(t *testing.T) { + for _, tt := range []struct { + name string + + configureRequest *configv1.ConfigureRequest + newClientErr error + expectCode codes.Code + expectMsg string + config *Config + }{ + { + name: "success", + config: &Config{ + ServiceAccountFile: "service-account-file", + BucketName: "bucket-name", + ObjectName: "object-name", + Format: "spiffe", + }, + }, + { + name: "no bucket", + config: &Config{ + ObjectName: "object-name", + Format: "spiffe", + }, + expectCode: codes.InvalidArgument, + expectMsg: "configuration is missing the bucket name", + }, + { + name: "no object name", + config: &Config{ + BucketName: "bucket-name", + Format: "spiffe", + }, + expectCode: codes.InvalidArgument, + expectMsg: "configuration is missing the object name", + }, + { + name: "no bundle format", + config: &Config{ + ObjectName: "object-name", + BucketName: "bucket-name", + }, + expectCode: codes.InvalidArgument, + expectMsg: "configuration is missing the bundle format", + }, + { + name: "client error", + config: &Config{ + ServiceAccountFile: "service-account-file", + BucketName: "bucket-name", + ObjectName: "object-name", + Format: "spiffe", + }, + expectCode: codes.Internal, + expectMsg: "failed to create client: client creation error", + newClientErr: errors.New("client creation error"), + }, + { + name: "invalid format", + config: &Config{ + BucketName: "bucket-name", + ObjectName: "object-name", + Format: "invalid-format", + }, + expectCode: codes.InvalidArgument, + expectMsg: "could not parse bundle format from configuration: unknown bundle format: \"invalid-format\"", + }, + } { + t.Run(tt.name, func(t *testing.T) { + var err error + options := []plugintest.Option{ + plugintest.CaptureConfigureError(&err), + plugintest.CoreConfig(catalog.CoreConfig{ + TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"), + }), + plugintest.ConfigureJSON(tt.config), + } + + newClient := func(ctx context.Context, opts ...option.ClientOption) (gcsService, error) { + if tt.newClientErr != nil { + return nil, tt.newClientErr + } + return &fakeClient{ + clientOptions: opts, + }, nil + } + + newStorageWriter := func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser { + return &fakeStorageWriter{} + } + p := newPlugin(newClient, newStorageWriter) + + plugintest.Load(t, builtin(p), nil, options...) + spiretest.RequireGRPCStatusHasPrefix(t, err, tt.expectCode, tt.expectMsg) + + if tt.expectMsg != "" { + require.Nil(t, p.config) + return + } + + // Check that the plugin has the expected configuration. + tt.config.bundleFormat, err = bundleformat.FromString(tt.config.Format) + require.NoError(t, err) + require.Equal(t, tt.config, p.config) + + client, ok := p.gcsClient.(*fakeClient) + require.True(t, ok) + + // It's important to check that the configuration has been wired + // up to the gcs config, that needs to have the specified service + // account file. + require.Equal(t, []option.ClientOption{option.WithCredentialsFile(tt.config.ServiceAccountFile)}, client.clientOptions) + }) + } +} + +func TestPublishBundle(t *testing.T) { + testBundle := getTestBundle(t) + config := &Config{ + BucketName: "bucket-name", + ObjectName: "object-name", + Format: "spiffe", + } + + for _, tt := range []struct { + name string + + newClientErr error + expectCode codes.Code + expectMsg string + noConfig bool + bundle *types.Bundle + writeErr error + closeErr error + }{ + { + name: "success", + bundle: testBundle, + }, + { + name: "multiple times", + bundle: testBundle, + }, + { + name: "write failure", + bundle: testBundle, + writeErr: errors.New("write error"), + expectCode: codes.Internal, + expectMsg: "failed to write bundle: write error", + }, + { + name: "close failure", + bundle: testBundle, + closeErr: errors.New("close error"), + expectCode: codes.Internal, + expectMsg: "failed to close storage writer: close error", + }, + { + name: "not configured", + noConfig: true, + expectCode: codes.FailedPrecondition, + expectMsg: "not configured", + }, + { + name: "missing bundle", + expectCode: codes.InvalidArgument, + expectMsg: "missing bundle in request", + }, + } { + t.Run(tt.name, func(t *testing.T) { + var err error + options := []plugintest.Option{ + plugintest.CaptureConfigureError(&err), + plugintest.CoreConfig(catalog.CoreConfig{ + TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"), + }), + plugintest.ConfigureJSON(config), + } + + newClient := func(ctx context.Context, opts ...option.ClientOption) (gcsService, error) { + return &fakeClient{ + clientOptions: opts, + }, nil + } + + newStorageWriter := func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser { + return &fakeStorageWriter{ + writeErr: tt.writeErr, + closeErr: tt.closeErr, + } + } + p := newPlugin(newClient, newStorageWriter) + + if !tt.noConfig { + plugintest.Load(t, builtin(p), nil, options...) + require.NoError(t, err) + } + + resp, err := p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: tt.bundle, + }) + + if tt.expectMsg != "" { + spiretest.RequireGRPCStatusContains(t, err, tt.expectCode, tt.expectMsg) + return + } + require.NoError(t, err) + require.NotNil(t, resp) + }) + } +} + +func TestPublishMultiple(t *testing.T) { + config := &Config{ + BucketName: "bucket-name", + ObjectName: "object-name", + Format: "spiffe", + } + + var err error + options := []plugintest.Option{ + plugintest.CaptureConfigureError(&err), + plugintest.CoreConfig(catalog.CoreConfig{ + TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"), + }), + plugintest.ConfigureJSON(config), + } + + newClient := func(ctx context.Context, opts ...option.ClientOption) (gcsService, error) { + return &fakeClient{ + clientOptions: opts, + }, nil + } + newStorageWriter := getFakeNewStorageWriterFunc(nil, nil) + p := newPlugin(newClient, newStorageWriter) + + var testWriteObjectCount int + p.hooks.wroteObjectFunc = func() { testWriteObjectCount++ } + plugintest.Load(t, builtin(p), nil, options...) + require.NoError(t, err) + + // Test multiple write operations, and check that only a call to Write is + // done when there is a modified bundle that was not successfully published + // before. + + // Have an initial bundle with SequenceNumber = 1. + bundle := getTestBundle(t) + bundle.SequenceNumber = 1 + + // Reset the testWriteObjectCount counter. + testWriteObjectCount = 0 + resp, err := p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, 1, testWriteObjectCount) + + // Call PublishBundle with the same bundle. + resp, err = p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // The same bundle was used, the testWriteObjectCount counter should be still 1. + require.Equal(t, 1, testWriteObjectCount) + + // Have a new bundle and call PublishBundle. + bundle = getTestBundle(t) + bundle.SequenceNumber = 2 + resp, err = p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // PublishBundle was called with a different bundle, testWriteObjectCount should + // be incremented to be 2. + require.Equal(t, 2, testWriteObjectCount) + + // Simulate that there is an error writing to the storage. + p.hooks.newStorageWriterFunc = getFakeNewStorageWriterFunc(errors.New("write error"), nil) + + resp, err = p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + // Since there is no change in the bundle, Write should not be called + // and there should be no error. + require.NoError(t, err) + require.NotNil(t, resp) + + // The same bundle was used, the testWriteObjectCount counter should be still 2. + require.Equal(t, 2, testWriteObjectCount) + + // Have a new bundle and call PublishBundle. Write should be called this + // time and return an error. + bundle = getTestBundle(t) + bundle.SequenceNumber = 3 + resp, err = p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + require.Error(t, err) + require.Nil(t, resp) + + // Since the bundle could not be published, testWriteObjectCount should be + // still 2. + require.Equal(t, 2, testWriteObjectCount) + + // Clear the Write error and call PublishBundle. + p.hooks.newStorageWriterFunc = getFakeNewStorageWriterFunc(nil, nil) + resp, err = p.PublishBundle(context.Background(), &bundlepublisherv1.PublishBundleRequest{ + Bundle: bundle, + }) + + // No error should happen this time. + require.NoError(t, err) + require.NotNil(t, resp) + + // The testWriteObjectCount counter should be incremented to 3, since the bundle + // should have been published successfully. + require.Equal(t, 3, testWriteObjectCount) +} + +type fakeClient struct { + clientOptions []option.ClientOption +} + +func (c *fakeClient) Bucket(string) *storage.BucketHandle { + return &storage.BucketHandle{} +} + +func (c *fakeClient) Close() error { + return nil +} + +type fakeStorageWriter struct { + writeErr error + closeErr error +} + +func (s *fakeStorageWriter) Write(p []byte) (n int, err error) { + if s.writeErr == nil { + return len(p), nil + } + return 0, s.writeErr +} + +func (s *fakeStorageWriter) Close() error { + return s.closeErr +} + +func getFakeNewStorageWriterFunc(writeErr, closeErr error) func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser { + return func(ctx context.Context, o *storage.ObjectHandle) io.WriteCloser { + return &fakeStorageWriter{ + writeErr: writeErr, + closeErr: closeErr, + } + } +} + +func getTestBundle(t *testing.T) *types.Bundle { + cert, _, err := util.LoadCAFixture() + require.NoError(t, err) + + keyPkix, err := x509.MarshalPKIXPublicKey(cert.PublicKey) + require.NoError(t, err) + + return &types.Bundle{ + TrustDomain: "example.org", + X509Authorities: []*types.X509Certificate{{Asn1: cert.Raw}}, + JwtAuthorities: []*types.JWTKey{ + { + KeyId: "KID", + PublicKey: keyPkix, + }, + }, + RefreshHint: 1440, + SequenceNumber: 100, + } +}