diff --git a/.changeset/red-cats-attend.md b/.changeset/red-cats-attend.md new file mode 100644 index 00000000000..b66b9305f09 --- /dev/null +++ b/.changeset/red-cats-attend.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#internal Refactor system tests diff --git a/core/scripts/cre/environment/environment/environment.go b/core/scripts/cre/environment/environment/environment.go index 6f39a288bcc..7a50b7f055a 100644 --- a/core/scripts/cre/environment/environment/environment.go +++ b/core/scripts/cre/environment/environment/environment.go @@ -424,7 +424,7 @@ func startCmd() *cobra.Command { // environment without full setup. Then persist absolute paths to the // generated artifacts (env artifact JSON and the cached CTF config) in // `artifact_paths.json`. System tests use these to reload environment -// state across runs (see `system-tests/tests/smoke/cre/capabilities_test.go`), +// state across runs (see `system-tests/tests/smoke/cre/cre_suite_test.go`), // where the cached config and env artifact are consumed to reconstruct // the in-memory CLDF environment without re-provisioning. // diff --git a/core/scripts/cre/environment/environment/workflow.go b/core/scripts/cre/environment/environment/workflow.go index 43cff2196ea..d417aa61a14 100644 --- a/core/scripts/cre/environment/environment/workflow.go +++ b/core/scripts/cre/environment/environment/workflow.go @@ -380,7 +380,7 @@ func compileWorkflow(workflowFilePathFlag, workflowNameFlag string) (string, err } func deployWorkflow(ctx context.Context, wasmWorkflowFilePathFlag, workflowNameFlag, workflowOwnerAddressFlag, workflowRegistryAddressFlag, capabilitiesRegistryAddressFlag, containerNamePatternFlag, containerTargetDirFlag, configFilePathFlag, secretsFilePathFlag, rpcURLFlag string, donIDFlag uint32, deleteWorkflowFile bool) error { - copyErr := creworkflow.CopyArtifactToDockerContainers(wasmWorkflowFilePathFlag, containerNamePatternFlag, containerTargetDirFlag) + copyErr := creworkflow.CopyArtifactsToDockerContainers(wasmWorkflowFilePathFlag, containerNamePatternFlag, containerTargetDirFlag) if copyErr != nil { return errors.Wrap(copyErr, "❌ failed to copy workflow to Docker container") } @@ -409,7 +409,7 @@ func deployWorkflow(ctx context.Context, wasmWorkflowFilePathFlag, workflowNameF return errors.Wrap(configPathAbsErr, "failed to get absolute path of the config file") } - configCopyErr := creworkflow.CopyArtifactToDockerContainers(configFilePathFlag, containerNamePatternFlag, containerTargetDirFlag) + configCopyErr := creworkflow.CopyArtifactsToDockerContainers(configFilePathFlag, containerNamePatternFlag, containerTargetDirFlag) if configCopyErr != nil { return errors.Wrap(configCopyErr, "❌ failed to copy config file to Docker container") } @@ -436,7 +436,7 @@ func deployWorkflow(ctx context.Context, wasmWorkflowFilePathFlag, workflowNameF fmt.Printf("\n✅ Encrypted workflow secrets file prepared\n\n") fmt.Printf("\n⚙️ Copying encrypted secrets file to Docker container\n") - secretsCopyErr := creworkflow.CopyArtifactToDockerContainers(secretPathAbs, containerNamePatternFlag, containerTargetDirFlag) + secretsCopyErr := creworkflow.CopyArtifactsToDockerContainers(secretPathAbs, containerNamePatternFlag, containerTargetDirFlag) if secretsCopyErr != nil { return errors.Wrap(secretsCopyErr, "❌ failed to copy encrypted secrets file to Docker container") } diff --git a/system-tests/lib/cre/capabilities/vault/vault.go b/system-tests/lib/cre/capabilities/vault/vault.go index 528a00d851d..d84401ee370 100644 --- a/system-tests/lib/cre/capabilities/vault/vault.go +++ b/system-tests/lib/cre/capabilities/vault/vault.go @@ -56,6 +56,27 @@ func New(chainID uint64) (*capabilities.Capability, error) { ) } +func EncryptSecret(secret string) (string, error) { + masterPublicKey := tdh2easy.PublicKey{} + masterPublicKeyBytes, err := hex.DecodeString(MasterPublicKeyStr) + if err != nil { + return "", errors.Wrap(err, "failed to decode master public key") + } + err = masterPublicKey.Unmarshal(masterPublicKeyBytes) + if err != nil { + return "", errors.Wrap(err, "failed to unmarshal master public key") + } + cipher, err := tdh2easy.Encrypt(&masterPublicKey, []byte(secret)) + if err != nil { + return "", errors.Wrap(err, "failed to encrypt secret") + } + cipherBytes, err := cipher.Marshal() + if err != nil { + return "", errors.Wrap(err, "failed to marshal encrypted secrets to bytes") + } + return hex.EncodeToString(cipherBytes), nil +} + func jobSpec(chainID uint64) cre.JobSpecFn { return func(input *cre.JobSpecInput) (cre.DonsToJobSpecs, error) { if input.DonTopology == nil { diff --git a/system-tests/lib/cre/contracts/contracts.go b/system-tests/lib/cre/contracts/contracts.go index 95dcc815f1a..3467b22e050 100644 --- a/system-tests/lib/cre/contracts/contracts.go +++ b/system-tests/lib/cre/contracts/contracts.go @@ -409,6 +409,29 @@ func MustFindAddressesForChain(addressBook cldf.AddressBook, chainSelector uint6 return addr } +// MergeAllDataStores merges all DataStores (after contracts deployments) +func MergeAllDataStores(fullCldEnvOutput *cre.FullCLDEnvironmentOutput, changesetOutputs ...cldf.ChangesetOutput) { + fmt.Print("Merging DataStores (after contracts deployments)...") + minChangesetsCap := 2 + if len(changesetOutputs) < minChangesetsCap { + panic(fmt.Errorf("DataStores merging failed: at least %d changesets required", minChangesetsCap)) + } + + // Start with the first changeset's data store + baseDataStore := changesetOutputs[0].DataStore + + // Merge all subsequent changesets into the base data store + for i := 1; i < len(changesetOutputs); i++ { + otherDataStore := changesetOutputs[i].DataStore + mergeErr := baseDataStore.Merge(otherDataStore.Seal()) + if mergeErr != nil { + panic(errors.Wrap(mergeErr, "DataStores merging failed")) + } + } + + fullCldEnvOutput.Environment.DataStore = baseDataStore.Seal() +} + func ConfigureWorkflowRegistry(testLogger zerolog.Logger, input *cre.WorkflowRegistryInput) (*cre.WorkflowRegistryOutput, error) { if input == nil { return nil, errors.New("input is nil") @@ -519,3 +542,61 @@ func ConfigureDataFeedsCache(testLogger zerolog.Logger, input *cre.ConfigureData return out, nil } + +func DeployDataFeedsCacheContract(testLogger zerolog.Logger, chainSelector uint64, fullCldEnvOutput *cre.FullCLDEnvironmentOutput) (common.Address, cldf.ChangesetOutput, error) { + testLogger.Info().Msg("Deploying Data Feeds Cache contract...") + deployDfConfig := df_changeset_types.DeployConfig{ + ChainsToDeploy: []uint64{chainSelector}, + Labels: []string{"data-feeds"}, // label required by the changeset + } + + dfOutput, dfErr := commonchangeset.RunChangeset(df_changeset.DeployCacheChangeset, *fullCldEnvOutput.Environment, deployDfConfig) + if dfErr != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrapf(dfErr, "failed to deploy Data Feeds Cache contract on chain %d", chainSelector) + } + + mergeErr := fullCldEnvOutput.Environment.ExistingAddresses.Merge(dfOutput.AddressBook) //nolint:staticcheck // won't migrate now + if mergeErr != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrap(mergeErr, "failed to merge address book of Data Feeds Cache contract") + } + testLogger.Info().Msgf("Data Feeds Cache contract deployed to %d", chainSelector) + + dataFeedsCacheAddress, dataFeedsCacheErr := FindAddressesForChain( + fullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now + chainSelector, + df_changeset.DataFeedsCache.String(), + ) + if dataFeedsCacheErr != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrapf(dataFeedsCacheErr, "failed to find Data Feeds Cache contract address on chain %d", chainSelector) + } + testLogger.Info().Msgf("Data Feeds Cache contract found on chain %d at address %s", chainSelector, dataFeedsCacheAddress) + + return dataFeedsCacheAddress, dfOutput, nil +} + +func DeployReadBalancesContract(testLogger zerolog.Logger, chainSelector uint64, fullCldEnvOutput *cre.FullCLDEnvironmentOutput) (common.Address, cldf.ChangesetOutput, error) { + testLogger.Info().Msg("Deploying Read Balances contract...") + deployReadBalanceRequest := &keystone_changeset.DeployRequestV2{ChainSel: chainSelector} + rbOutput, rbErr := keystone_changeset.DeployBalanceReaderV2(*fullCldEnvOutput.Environment, deployReadBalanceRequest) + if rbErr != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrap(rbErr, "failed to deploy Read Balances contract") + } + + mergeErr2 := fullCldEnvOutput.Environment.ExistingAddresses.Merge(rbOutput.AddressBook) //nolint:staticcheck // won't migrate now + if mergeErr2 != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrap(mergeErr2, "failed to merge address book of Read Balances contract") + } + testLogger.Info().Msgf("Read Balances contract deployed to %d", chainSelector) + + readBalancesAddress, readContractErr := FindAddressesForChain( + fullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now + chainSelector, + keystone_changeset.BalanceReader.String(), + ) + if readContractErr != nil { + return common.Address{}, cldf.ChangesetOutput{}, errors.Wrap(readContractErr, "failed to find Read Balances contract address") + } + testLogger.Info().Msgf("Read Balances contract found on chain %d at address %s", chainSelector, readBalancesAddress) + + return readBalancesAddress, rbOutput, nil +} diff --git a/system-tests/lib/cre/workflow/docker.go b/system-tests/lib/cre/workflow/docker.go index 9843a877e05..0bcb8c9d244 100644 --- a/system-tests/lib/cre/workflow/docker.go +++ b/system-tests/lib/cre/workflow/docker.go @@ -43,7 +43,23 @@ func findAllDockerContainerNames(pattern string) ([]string, error) { return containerNames, nil } -func CopyArtifactToDockerContainers(filePath string, containerNamePattern string, targetDir string) error { +func CopyArtifactsToDockerContainers(containerTargetDir string, containerNamePattern string, filesToCopy ...string) error { + for _, file := range filesToCopy { + if _, err := os.Stat(file); err != nil { + fmt.Fprintf(os.Stderr, "Warning: File '%s' does not exist. Skipping file copying to docker containers\n", file) + continue + } + + workflowCopyErr := copyArtifactToDockerContainers(file, containerNamePattern, containerTargetDir) + if workflowCopyErr != nil { + return errors.Wrapf(workflowCopyErr, "failed to copy a file (%s) to docker containers", file) + } + } + return nil +} + +func copyArtifactToDockerContainers(filePath string, containerNamePattern string, targetDir string) error { + fmt.Printf("Copying file '%s' to Docker containers.\n", filePath) containerNames, containerNamesErr := findAllDockerContainerNames(containerNamePattern) if containerNamesErr != nil { return errors.Wrap(containerNamesErr, "failed to find Docker containers") diff --git a/system-tests/lib/cre/workflow/workflow.go b/system-tests/lib/cre/workflow/workflow.go index cb225823d21..ecf382f09c2 100644 --- a/system-tests/lib/cre/workflow/workflow.go +++ b/system-tests/lib/cre/workflow/workflow.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "math/big" + "os" "path/filepath" "strings" @@ -38,7 +39,7 @@ func RegisterWithContract(ctx context.Context, sc *seth.Client, workflowRegistry var configData []byte var configErr error configURLToUse := "" - if configURL != nil { + if configURL != nil && *configURL != "" { configData, configErr = libnet.Download(ctx, *configURL) if configErr != nil { return "", errors.Wrap(configErr, "failed to download workflow config") @@ -52,7 +53,7 @@ func RegisterWithContract(ctx context.Context, sc *seth.Client, workflowRegistry } secretsURLToUse := "" - if secretsURL != nil { + if secretsURL != nil && *secretsURL != "" { if artifactsDirInContainer != nil { secretsURLToUse = fmt.Sprintf("file://%s/%s", *artifactsDirInContainer, filepath.Base(*secretsURL)) } else { @@ -157,6 +158,20 @@ func DeleteWithContract(ctx context.Context, sc *seth.Client, workflowRegistryAd return nil } +func RemoveWorkflowArtifactsFromLocalEnv(workflowArtifactsLocations ...string) error { + for _, artifactLocation := range workflowArtifactsLocations { + if artifactLocation == "" { + continue + } + + err := os.Remove(artifactLocation) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed to remove workflow artifact located at %s: %s", artifactLocation, err.Error())) + } + } + return nil +} + func generateWorkflowIDFromStrings(owner string, name string, workflow []byte, config []byte, secretsURL string) (string, error) { ownerWithoutPrefix := owner if strings.HasPrefix(owner, "0x") { diff --git a/system-tests/lib/crypto/evm.go b/system-tests/lib/crypto/evm.go index 60d505fe14b..bf212577c09 100644 --- a/system-tests/lib/crypto/evm.go +++ b/system-tests/lib/crypto/evm.go @@ -1,7 +1,12 @@ package crypto import ( + "crypto/ecdsa" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-testing-framework/framework/clclient" ) @@ -27,3 +32,18 @@ func GenerateEVMKeys(password string, n int) (*EVMKeys, error) { } return result, nil } + +/* +Generates new private and public key pair + +Returns a new public address and a private key +*/ +func GenerateNewKeyPair() (common.Address, *ecdsa.PrivateKey, error) { + privateKey, pkErr := crypto.GenerateKey() + if pkErr != nil { + return common.Address{}, nil, errors.Wrap(pkErr, "failed to generate a new private key (EOA)") + } + + publicKeyAddr := crypto.PubkeyToAddress(privateKey.PublicKey) + return publicKeyAddr, privateKey, nil +} diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index 380d13a0ccc..6e2f610c83b 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -42,7 +42,6 @@ require ( github.com/smartcontractkit/chainlink/deployment v0.0.0-20250826151008-ae5ec0ee6f2c github.com/smartcontractkit/chainlink/system-tests/lib v0.0.0-20250826151008-ae5ec0ee6f2c github.com/smartcontractkit/libocr v0.0.0-20250707144819-babe0ec4e358 - github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20250624150019-e49f7e125e6b github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 golang.org/x/sync v0.16.0 @@ -537,6 +536,7 @@ require ( github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect github.com/smartcontractkit/mcms v0.21.1 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de // indirect + github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20250624150019-e49f7e125e6b // indirect github.com/smartcontractkit/wsrpc v0.8.5-0.20250502134807-c57d3d995945 // indirect github.com/sony/gobreaker/v2 v2.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect diff --git a/system-tests/tests/smoke/cre/README.md b/system-tests/tests/smoke/cre/README.md index 79a064686a0..467e45dadc3 100644 --- a/system-tests/tests/smoke/cre/README.md +++ b/system-tests/tests/smoke/cre/README.md @@ -138,6 +138,7 @@ Only if you want to run the tests on non-default topology you need to set follow - `CTF_CONFIGS` -- either `configs/workflow-gateway-don.toml` or `configs/workflow-gateway-capabilities-don.toml` - `CRE_TOPOLOGY` -- either `workflow-gateway` or `workflow-gateway-capabilities` +- `CTF_LOG_LEVEL=debug` -- to display test debug-level logs --- @@ -173,7 +174,7 @@ Example `launch.json` entry: "request": "launch", "mode": "test", "program": "${workspaceFolder}/system-tests/tests/smoke/cre", - "args": ["-test.run", "Test_CRE_Workflow_Don"] + "args": ["-test.run", "Test_CRE_Suite"] } ``` @@ -578,7 +579,7 @@ After compilation, workflow files must be distributed to the appropriate contain containerTargetDir := "/home/chainlink/workflows" // Copy compiled workflow binary -workflowCopyErr := creworkflow.CopyArtifactToDockerContainers( +workflowCopyErr := creworkflow.CopyArtifactsToDockerContainers( compressedWorkflowWasmPath, "workflow-node", containerTargetDir, @@ -586,7 +587,7 @@ workflowCopyErr := creworkflow.CopyArtifactToDockerContainers( require.NoError(t, workflowCopyErr, "failed to copy workflow to docker containers") // Copy configuration file -configCopyErr := creworkflow.CopyArtifactToDockerContainers( +configCopyErr := creworkflow.CopyArtifactsToDockerContainers( workflowConfigFilePath, "workflow-node", containerTargetDir, @@ -681,11 +682,11 @@ func setupWorkflow(t *testing.T, workflowSourcePath, workflowName string, config // 3. Copy files to containers containerTargetDir := "/home/chainlink/workflows" - err := creworkflow.CopyArtifactToDockerContainers(compressedWorkflowWasmPath, "workflow-node", containerTargetDir) + err := creworkflow.CopyArtifactsToDockerContainers(compressedWorkflowWasmPath, "workflow-node", containerTargetDir) require.NoError(t, err, "failed to copy workflow binary") if configFilePath != "" { - err = creworkflow.CopyArtifactToDockerContainers(configFilePath, "workflow-node", containerTargetDir) + err = creworkflow.CopyArtifactsToDockerContainers(configFilePath, "workflow-node", containerTargetDir) require.NoError(t, err, "failed to copy config file") } @@ -777,7 +778,7 @@ encryptedSecretsPath, err := creworkflow.PrepareSecrets( require.NoError(t, err, "failed to prepare secrets") // 3. Copy encrypted secrets to containers -err = creworkflow.CopyArtifactToDockerContainers( +err = creworkflow.CopyArtifactsToDockerContainers( encryptedSecretsPath, "workflow-node", "/home/chainlink/workflows", @@ -869,10 +870,10 @@ func setupWorkflowWithSecrets(t *testing.T, workflowSourcePath, workflowName, se // Copy files to containers containerTargetDir := "/home/chainlink/workflows" - err = creworkflow.CopyArtifactToDockerContainers(compressedWorkflowWasmPath, "workflow-node", containerTargetDir) + err = creworkflow.CopyArtifactsToDockerContainers(compressedWorkflowWasmPath, "workflow-node", containerTargetDir) require.NoError(t, err, "failed to copy workflow") - err = creworkflow.CopyArtifactToDockerContainers(encryptedSecretsPath, "workflow-node", containerTargetDir) + err = creworkflow.CopyArtifactsToDockerContainers(encryptedSecretsPath, "workflow-node", containerTargetDir) require.NoError(t, err, "failed to copy secrets") // Register workflow with secrets diff --git a/system-tests/tests/smoke/cre/before_suite.go b/system-tests/tests/smoke/cre/before_suite.go new file mode 100644 index 00000000000..1a492a9ee09 --- /dev/null +++ b/system-tests/tests/smoke/cre/before_suite.go @@ -0,0 +1,140 @@ +package cre + +import ( + "encoding/json" + "os" + "os/exec" + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" + + cldlogger "github.com/smartcontractkit/chainlink/deployment/logger" + + "github.com/smartcontractkit/chainlink/system-tests/lib/cre" + "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment" + envconfig "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/config" +) + +const ( + DefaultConfigPath = "../../../../core/scripts/cre/environment/configs/workflow-don.toml" + DefaultEnvironmentDir = "../../../../core/scripts/cre/environment" + DefaultEnvArtifactFile = "../../../../core/scripts/cre/environment/env_artifact/env_artifact.json" +) + +type TestEnvironment struct { + Config *envconfig.Config + EnvArtifact environment.EnvArtifact + Logger zerolog.Logger + FullCldEnvOutput *cre.FullCLDEnvironmentOutput + WrappedBlockchainOutputs []*cre.WrappedBlockchainOutput +} + +// setupTestEnvironment initializes the common test environment +func SetupTestEnvironment(t *testing.T) *TestEnvironment { + t.Helper() + + createEnvironment(t) + in := getEnvironmentConfig(t) + envArtifact := getEnvironmentArtifact(t) + fullCldEnvOutput, wrappedBlockchainOutputs, err := environment.BuildFromSavedState(t.Context(), cldlogger.NewSingleFileLogger(t), in, envArtifact) + require.NoError(t, err, "failed to load environment") + + return &TestEnvironment{ + Config: in, + EnvArtifact: envArtifact, + Logger: framework.L, + FullCldEnvOutput: fullCldEnvOutput, + WrappedBlockchainOutputs: wrappedBlockchainOutputs, + } +} + +func getEnvironmentConfig(t *testing.T) *envconfig.Config { + in, err := framework.Load[envconfig.Config](nil) + require.NoError(t, err, "couldn't load environment state") + return in +} + +func getEnvironmentArtifact(t *testing.T) environment.EnvArtifact { + var envArtifact environment.EnvArtifact + artFile, err := os.ReadFile(os.Getenv("ENV_ARTIFACT_PATH")) + require.NoError(t, err, "failed to read artifact file") + err = json.Unmarshal(artFile, &envArtifact) + require.NoError(t, err, "failed to unmarshal artifact file") + return envArtifact +} + +func createEnvironment(t *testing.T) { + t.Helper() + + confErr := setConfigurationIfMissing(DefaultConfigPath, DefaultEnvArtifactFile) + require.NoError(t, confErr, "failed to set configuration") + + createErr := createEnvironmentIfNotExists(DefaultEnvironmentDir) + require.NoError(t, createErr, "failed to create environment") + + // transform the config file to the cache file, so that we can use the cached environment + cachedConfigFile, cacheErr := ctfConfigToCacheFile() + require.NoError(t, cacheErr, "failed to get cached config file") + + setErr := os.Setenv("CTF_CONFIGS", cachedConfigFile) + require.NoError(t, setErr, "failed to set CTF_CONFIGS env var") +} + +func setConfigurationIfMissing(configName, envArtifactPath string) error { + if os.Getenv("CTF_CONFIGS") == "" { + err := os.Setenv("CTF_CONFIGS", configName) + if err != nil { + return errors.Wrap(err, "failed to set CTF_CONFIGS env var") + } + } + + if os.Getenv("ENV_ARTIFACT_PATH") == "" { + err := os.Setenv("ENV_ARTIFACT_PATH", envArtifactPath) + if err != nil { + return errors.Wrap(err, "failed to set ENV_ARTIFACT_PATH env var") + } + } + + return environment.SetDefaultPrivateKeyIfEmpty(blockchain.DefaultAnvilPrivateKey) +} + +func createEnvironmentIfNotExists(environmentDir string) error { + cachedConfigFile, cacheErr := ctfConfigToCacheFile() + if cacheErr != nil { + return errors.Wrap(cacheErr, "failed to get cached config file") + } + + if _, err := os.Stat(cachedConfigFile); os.IsNotExist(err) { + framework.L.Info().Str("cached_config_file", cachedConfigFile).Msg("Cached config file does not exist, starting environment...") + cmd := exec.Command("go", "run", ".", "env", "start") + cmd.Dir = environmentDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmdErr := cmd.Run() + if cmdErr != nil { + return errors.Wrap(cmdErr, "failed to start environment") + } + } + + return nil +} + +func ctfConfigToCacheFile() (string, error) { + configFile := os.Getenv("CTF_CONFIGS") + if configFile == "" { + return "", errors.New("CTF_CONFIGS env var is not set") + } + + if strings.HasSuffix(configFile, "-cache.toml") { + return configFile, nil + } + + split := strings.Split(configFile, ",") + return strings.ReplaceAll(split[0], ".toml", "") + "-cache.toml", nil +} diff --git a/system-tests/tests/smoke/cre/beholder_helpers.go b/system-tests/tests/smoke/cre/beholder_helpers.go index ce84d5a7dbc..69c4a745866 100644 --- a/system-tests/tests/smoke/cre/beholder_helpers.go +++ b/system-tests/tests/smoke/cre/beholder_helpers.go @@ -39,6 +39,7 @@ func loadBeholderStackCache() (*config.ChipIngressConfig, error) { func startBeholderStackIfIsNotRunning(stateFile, environmentDir string) error { split := strings.Split(stateFile, ",") if _, err := os.Stat(split[0]); os.IsNotExist(err) { + framework.L.Info().Msg("Beholder has not been found. Starting Beholder...") cmd := exec.Command("go", "run", ".", "env", "beholder", "start") cmd.Dir = environmentDir cmd.Stdout = os.Stdout @@ -48,7 +49,7 @@ func startBeholderStackIfIsNotRunning(stateFile, environmentDir string) error { return errors.Wrap(cmdErr, "failed to start Beholder") } } - + framework.L.Info().Msg("Beholder is running.") return nil } diff --git a/system-tests/tests/smoke/cre/capabilities_test.go b/system-tests/tests/smoke/cre/capabilities_test.go deleted file mode 100644 index 9ad5d5e92e0..00000000000 --- a/system-tests/tests/smoke/cre/capabilities_test.go +++ /dev/null @@ -1,1284 +0,0 @@ -package cre - -import ( - "bytes" - "context" - "crypto/ecdsa" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "math/rand" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "slices" - "strconv" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - "google.golang.org/protobuf/encoding/protojson" - - "google.golang.org/protobuf/proto" - "gopkg.in/yaml.v3" - - "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" - - common_events "github.com/smartcontractkit/chainlink-protos/workflows/go/common" - workflow_events "github.com/smartcontractkit/chainlink-protos/workflows/go/events" - - vaultcommon "github.com/smartcontractkit/chainlink-common/pkg/capabilities/actions/vault" - jsonrpc "github.com/smartcontractkit/chainlink-common/pkg/jsonrpc2" - "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - cldf "github.com/smartcontractkit/chainlink-deployments-framework/deployment" - "github.com/smartcontractkit/chainlink/deployment/common/changeset" - df_changeset "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" - df_changeset_types "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset/types" - keystone_changeset "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" - - "github.com/smartcontractkit/chainlink-testing-framework/framework" - "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" - "github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake" - "github.com/smartcontractkit/chainlink-testing-framework/seth" - - "github.com/smartcontractkit/chainlink-evm/gethwrappers/data-feeds/generated/data_feeds_cache" - - cldlogger "github.com/smartcontractkit/chainlink/deployment/logger" - - vaultapi "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/vault" - corevm "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" - "github.com/smartcontractkit/chainlink/v2/core/utils" - - gateway_common "github.com/smartcontractkit/chainlink-common/pkg/types/gateway" - - "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/ptr" - - "github.com/smartcontractkit/chainlink/system-tests/lib/cre" - crevault "github.com/smartcontractkit/chainlink/system-tests/lib/cre/capabilities/vault" - crecontracts "github.com/smartcontractkit/chainlink/system-tests/lib/cre/contracts" - "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment" - envconfig "github.com/smartcontractkit/chainlink/system-tests/lib/cre/environment/config" - "github.com/smartcontractkit/chainlink/system-tests/lib/cre/flags" - creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" - - portypes "github.com/smartcontractkit/chainlink/core/scripts/cre/environment/examples/workflows/v1/proof-of-reserve/cron-based/types" -) - -const ( - AuthorizationKeySecretName = "AUTH_KEY" - // TODO: use once we can run these tests in CI (https://smartcontract-it.atlassian.net/browse/DX-589) - // AuthorizationKey = "12a-281j&@91.sj1:_}" - AuthorizationKey = "" - - // Test configuration constants - DefaultVerificationTimeout = 5 * time.Minute - ContainerTargetDir = "/home/chainlink/workflows" - WorkflowNodePrefix = "workflow-node" - DefaultConfigPath = "../../../../core/scripts/cre/environment/configs/workflow-don.toml" - DefaultEnvironmentDir = "../../../../core/scripts/cre/environment" - PoRWorkflowLocation = "../../../../core/scripts/cre/environment/examples/workflows/v1/proof-of-reserve/cron-based/main.go" - HTTPWorkflowLocation = "../../../../core/scripts/cre/environment/examples/workflows/v2/http_simple/main.go" - DefaultEnvArtifactPath = "../../../../core/scripts/cre/environment/env_artifact/env_artifact.json" - DefaultEnvArtifactFile = "../../../../core/scripts/cre/environment/env_artifact/env_artifact.json" - RetryInterval = 2 * time.Second - ValidationInterval = 10 * time.Second -) - -type TestEnvironment struct { - Config *envconfig.Config - EnvArtifact environment.EnvArtifact - Logger zerolog.Logger - FullCldEnvOutput *cre.FullCLDEnvironmentOutput - WrappedBlockchainOutputs []*cre.WrappedBlockchainOutput -} - -type WorkflowTestConfig struct { - WorkflowName string - WorkflowLocation string - FeedIDs []string - Timeout time.Duration -} - -/* -To execute on local start the local CRE first with following command: -# inside core/scripts/cre/environment directory -1. ensure necessary capabilities (cron, readcontract) are added (see README in core/scripts/cre/environment for [extra_capabilities]) -2. `go run . env start && ctf obs up && ctf bs up`. -It will start env + observability + blockscout. -*/ -func Test_CRE_Workflow_Don(t *testing.T) { - testEnv := setupTestEnvironment(t) - - // currently we can't run these tests in parallel, because each test rebuilds environment structs and that includes - // logging into CL node with GraphQL API, which allows only 1 session per user at a time. - t.Run("cron-based PoR workflow", func(t *testing.T) { - executePoRTest(t, testEnv) - }) - - t.Run("vault DON test", func(t *testing.T) { - executeVaultTest(t, testEnv) - }) - - t.Run("http trigger and action test", func(t *testing.T) { - executeHTTPTriggerActionTest(t, testEnv) - }) - - t.Run("DON Time test", func(t *testing.T) { - // TODO: Implement smoke test - https://smartcontract-it.atlassian.net/browse/CAPPL-1028 - t.Skip() - }) - - t.Run("Beholder test", func(t *testing.T) { - executeBeholderTest(t, testEnv) - }) -} - -// WorkflowRegistrationConfig holds configuration for workflow registration -type WorkflowRegistrationConfig struct { - WorkflowName string - WorkflowLocation string - ConfigFilePath string - CompressedWasmPath string - WorkflowRegistryAddr common.Address - DonID uint64 - ContainerTargetDir string -} - -// setupTestEnvironment initializes the common test environment -func setupTestEnvironment(t *testing.T) *TestEnvironment { - confErr := setConfigurationIfMissing(DefaultConfigPath, DefaultEnvArtifactFile) - require.NoError(t, confErr, "failed to set configuration") - - createErr := createEnvironmentIfNotExists(DefaultEnvironmentDir) - require.NoError(t, createErr, "failed to create environment") - - // transform the config file to the cache file, so that we can use the cached environment - cachedConfigFile, cacheErr := ctfConfigToCacheFile() - require.NoError(t, cacheErr, "failed to get cached config file") - - setErr := os.Setenv("CTF_CONFIGS", cachedConfigFile) - require.NoError(t, setErr, "failed to set CTF_CONFIGS env var") - - /* - LOAD ENVIRONMENT STATE - */ - in, err := framework.Load[envconfig.Config](nil) - require.NoError(t, err, "couldn't load environment state") - - var envArtifact environment.EnvArtifact - artFile, err := os.ReadFile(os.Getenv("ENV_ARTIFACT_PATH")) - require.NoError(t, err, "failed to read artifact file") - err = json.Unmarshal(artFile, &envArtifact) - require.NoError(t, err, "failed to unmarshal artifact file") - - fullCldEnvOutput, wrappedBlockchainOutputs, err := environment.BuildFromSavedState(t.Context(), cldlogger.NewSingleFileLogger(t), in, envArtifact) - require.NoError(t, err, "failed to load environment") - - return &TestEnvironment{ - Config: in, - EnvArtifact: envArtifact, - Logger: framework.L, - FullCldEnvOutput: fullCldEnvOutput, - WrappedBlockchainOutputs: wrappedBlockchainOutputs, - } -} - -// copyWorkflowFilesToContainers copies workflow files to Docker containers -func copyWorkflowFilesToContainers(t *testing.T, wasmPath, configPath, containerTargetDir string) { - workflowCopyErr := creworkflow.CopyArtifactToDockerContainers(wasmPath, WorkflowNodePrefix, containerTargetDir) - require.NoError(t, workflowCopyErr, "failed to copy workflow to docker containers") - - configCopyErr := creworkflow.CopyArtifactToDockerContainers(configPath, WorkflowNodePrefix, containerTargetDir) - require.NoError(t, configCopyErr, "failed to copy workflow config to docker containers") -} - -// registerWorkflow registers a workflow with the contract -func registerWorkflow(ctx context.Context, t *testing.T, config *WorkflowRegistrationConfig, sethClient *seth.Client, testLogger zerolog.Logger) { - workflowID, registerErr := creworkflow.RegisterWithContract( - ctx, - sethClient, - config.WorkflowRegistryAddr, - config.DonID, - config.WorkflowName, - "file://"+config.CompressedWasmPath, - ptr.Ptr("file://"+config.ConfigFilePath), - nil, - &config.ContainerTargetDir, - ) - require.NoError(t, registerErr, "failed to register workflow '%s'", config.WorkflowName) - testLogger.Info().Msgf("Workflow registered successfully: '%s'", workflowID) -} - -// validatePoRPrices validates that all feeds receive the expected prices from the price provider -func validatePoRPrices(t *testing.T, testEnv *TestEnvironment, priceProvider PriceProvider, config *WorkflowTestConfig) { - eg := &errgroup.Group{} - - for idx, bcOutput := range testEnv.WrappedBlockchainOutputs { - if bcOutput.BlockchainOutput.Type == blockchain.FamilySolana { - continue - } - - eg.Go(func() error { - feedID := config.FeedIDs[idx] - testEnv.Logger.Info().Msgf("Waiting for feed %s to update...", feedID) - - dataFeedsCacheAddresses, dataFeedsCacheErr := crecontracts.FindAddressesForChain( - testEnv.FullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - bcOutput.ChainSelector, - df_changeset.DataFeedsCache.String(), - ) - if dataFeedsCacheErr != nil { - return fmt.Errorf("failed to find data feeds cache address for chain %d: %w", bcOutput.ChainID, dataFeedsCacheErr) - } - - dataFeedsCacheInstance, instanceErr := data_feeds_cache.NewDataFeedsCache(dataFeedsCacheAddresses, bcOutput.SethClient.Client) - if instanceErr != nil { - return fmt.Errorf("failed to create data feeds cache instance: %w", instanceErr) - } - - startTime := time.Now() - require.Eventually(t, func() bool { - elapsed := time.Since(startTime).Round(time.Second) - price, err := dataFeedsCacheInstance.GetLatestAnswer(bcOutput.SethClient.NewCallOpts(), [16]byte(common.Hex2Bytes(feedID))) - if err != nil { - testEnv.Logger.Error().Err(err).Msg("failed to get price from Data Feeds Cache contract") - return false - } - - // if there are no more prices to be found, we can stop waiting - return !priceProvider.NextPrice(feedID, price, elapsed) - }, config.Timeout, ValidationInterval, "feed %s did not update, timeout after: %s", feedID, config.Timeout) - - expected := priceProvider.ExpectedPrices(feedID) - actual := priceProvider.ActualPrices(feedID) - - if len(expected) != len(actual) { - return fmt.Errorf("expected %d prices, got %d", len(expected), len(actual)) - } - - for i := range expected { - if expected[i].Cmp(actual[i]) != 0 { - return fmt.Errorf("expected price %d, got %d", expected[i], actual[i]) - } - } - - testEnv.Logger.Info().Msgf("All prices were found in the feed %s", feedID) - return nil - }) - } - - err := eg.Wait() - require.NoError(t, err, "price validation failed") - - testEnv.Logger.Info().Msgf("All prices were found for all feeds") -} - -func executePoRTest(t *testing.T, testEnv *TestEnvironment) { - feedIDs := []string{"018e16c39e000320000000000000000000000000000000000000000000000000", "018e16c38e000320000000000000000000000000000000000000000000000000"} - priceProvider, err := NewFakePriceProvider(testEnv.Logger, testEnv.Config.Fake, AuthorizationKey, feedIDs) - require.NoError(t, err, "failed to create fake price provider") - - config := &WorkflowTestConfig{ - WorkflowName: "por-workflow", - WorkflowLocation: PoRWorkflowLocation, - FeedIDs: feedIDs, - Timeout: DefaultVerificationTimeout, - } - - executePoRWorkflowTest(t, testEnv, priceProvider, config) -} - -// executePoRWorkflowTest handles the main PoR workflow test logic -func executePoRWorkflowTest(t *testing.T, testEnv *TestEnvironment, priceProvider PriceProvider, config *WorkflowTestConfig) { - homeChainSelector := testEnv.WrappedBlockchainOutputs[0].ChainSelector - writeableChains := []uint64{} - for _, bcOutput := range testEnv.WrappedBlockchainOutputs { - for _, donMetadata := range testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata { - if flags.RequiresForwarderContract(donMetadata.Flags, bcOutput.ChainID) { - if !slices.Contains(writeableChains, bcOutput.ChainID) { - writeableChains = append(writeableChains, bcOutput.ChainID) - } - } - } - } - require.Len(t, config.FeedIDs, len(writeableChains), "number of writeable chains must match number of feed IDs (check what chains 'evm' and 'write-evm' capabilities are enabled for)") - - /* - DEPLOY DATA FEEDS CACHE + READ BALANCES CONTRACTS ON ALL CHAINS (except read-only ones) - Workflow will write price data to the data feeds cache contract - - REGISTER ONE WORKFLOW PER CHAIN (except read-only ones) - */ - for idx, bcOutput := range testEnv.WrappedBlockchainOutputs { - if bcOutput.BlockchainOutput.Type == blockchain.FamilySolana { - continue - } - - // deploy data feeds cache contract only on chains that are writeable - if !slices.Contains(writeableChains, bcOutput.ChainID) { - continue - } - - chainSelector := bcOutput.ChainSelector - fullCldEnvOutput := testEnv.FullCldEnvOutput - testLogger := testEnv.Logger - - testLogger.Info().Msgf("Deploying additional contracts to %d", chainSelector) - testLogger.Info().Msg("Deploying Data Feeds Cache contract...") - deployDfConfig := df_changeset_types.DeployConfig{ - ChainsToDeploy: []uint64{chainSelector}, - Labels: []string{"data-feeds"}, // label required by the changeset - } - dfOutput, dfErr := changeset.RunChangeset(df_changeset.DeployCacheChangeset, *fullCldEnvOutput.Environment, deployDfConfig) - require.NoError(t, dfErr, "failed to deploy Data Feed Cache contract") - mergeErr := fullCldEnvOutput.Environment.ExistingAddresses.Merge(dfOutput.AddressBook) //nolint:staticcheck // won't migrate now - require.NoError(t, mergeErr, "failed to merge address book of Data Feeds Cache contract") - testLogger.Info().Msgf("Data Feeds Cache contract deployed to %d", chainSelector) - - testLogger.Info().Msg("Deploying Read Balances contract...") - deployReadBalanceRequest := &keystone_changeset.DeployRequestV2{ChainSel: chainSelector} - rbOutput, rbErr := keystone_changeset.DeployBalanceReaderV2(*fullCldEnvOutput.Environment, deployReadBalanceRequest) - require.NoError(t, rbErr, "failed to deploy Read Balances contract") - mergeErr2 := fullCldEnvOutput.Environment.ExistingAddresses.Merge(rbOutput.AddressBook) //nolint:staticcheck // won't migrate now - require.NoError(t, mergeErr2, "failed to merge address book of Read Balances contract") - testLogger.Info().Msgf("Read Balances contract deployed to %d", chainSelector) - - mergeErr3 := dfOutput.DataStore.Merge(rbOutput.DataStore.Seal()) - require.NoError(t, mergeErr3, "failed to merge data stores") - fullCldEnvOutput.Environment.DataStore = dfOutput.DataStore.Seal() - - workflowName := "por-workflow-" + bcOutput.BlockchainOutput.ChainID + "-" + uuid.New().String()[0:4] - - dfConfigInput := &configureDataFeedsCacheInput{ - chainSelector: chainSelector, - fullCldEnvironment: fullCldEnvOutput.Environment, - workflowName: workflowName, - feedID: config.FeedIDs[idx], - sethClient: bcOutput.SethClient, - blockchain: bcOutput.BlockchainOutput, - } - dfConfigErr := configureDataFeedsCacheContract(testEnv.Logger, dfConfigInput) - require.NoError(t, dfConfigErr, "failed to configure data feeds cache") - - chainID := bcOutput.ChainID - workflowRegistryAddress, workflowRegistryErr := crecontracts.FindAddressesForChain( - fullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - homeChainSelector, // it should live only on one chain, it is not deployed to all chains - keystone_changeset.WorkflowRegistry.String(), - ) - require.NoError(t, workflowRegistryErr, "failed to find Workflow Registry address.") - testLogger.Info().Msgf("Workflow Registry contract found at chain selector %d at %s", homeChainSelector, workflowRegistryAddress) - - testLogger.Info().Msgf("Registering PoR workflow on chain %d (%d)", chainID, chainSelector) - testLogger.Info().Msg("Creating workflow config file.") - readBalancesAddress, readContractErr := crecontracts.FindAddressesForChain( - fullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - chainSelector, - keystone_changeset.BalanceReader.String(), - ) - require.NoError(t, readContractErr, "failed to find Read Balances contract address for chain %d", chainID) - testLogger.Info().Msgf("Read Balances contract found on chain %d at %s", chainID, readBalancesAddress) - - dataFeedsCacheAddress, dataFeedsCacheErr := crecontracts.FindAddressesForChain( - fullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - chainSelector, - df_changeset.DataFeedsCache.String(), - ) - require.NoError(t, dataFeedsCacheErr, "failed to find Data Feeds Cache address for chain %d", chainID) - testLogger.Info().Msgf("Data Feeds Cache contract found on chain %d at %s", chainID, dataFeedsCacheAddress) - - workflowConfigFilePath, configErr := createWorkflowConfigFile(bcOutput, readBalancesAddress, dataFeedsCacheAddress, workflowName, config.FeedIDs[idx], priceProvider.URL(), corevm.GenerateWriteTargetName(chainID)) - require.NoError(t, configErr, "failed to create workflow config file") - testLogger.Info().Msgf("Workflow config file created.") - - compressedWorkflowWasmPath, compileErr := creworkflow.CompileWorkflow(config.WorkflowLocation, workflowName) - require.NoError(t, compileErr, "failed to compile workflow '%s'", config.WorkflowLocation) - testLogger.Info().Msgf("Workflow compiled successfully.") - - require.NoError(t, compileErr, "failed to compile workflow") - - t.Cleanup(func() { - wasmErr := os.Remove(compressedWorkflowWasmPath) - if wasmErr != nil { - framework.L.Warn().Msgf("failed to remove workflow wasm file %s: %s", compressedWorkflowWasmPath, wasmErr.Error()) - } - configErr := os.Remove(workflowConfigFilePath) - if configErr != nil { - framework.L.Warn().Msgf("failed to remove workflow config file %s: %s", workflowConfigFilePath, configErr.Error()) - } - deleteErr := creworkflow.DeleteWithContract(t.Context(), testEnv.WrappedBlockchainOutputs[0].SethClient, workflowRegistryAddress, workflowName) - if deleteErr != nil { - framework.L.Warn().Msgf("failed to delete workflow %s: %s. Please delete it manually.", workflowName, deleteErr.Error()) - } - }) - - copyWorkflowFilesToContainers(t, compressedWorkflowWasmPath, workflowConfigFilePath, ContainerTargetDir) - - regConfig := &WorkflowRegistrationConfig{ - WorkflowName: workflowName, - WorkflowLocation: config.WorkflowLocation, - ConfigFilePath: workflowConfigFilePath, - CompressedWasmPath: compressedWorkflowWasmPath, - WorkflowRegistryAddr: workflowRegistryAddress, - DonID: testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, - ContainerTargetDir: ContainerTargetDir, - } - registerWorkflow(t.Context(), t, regConfig, testEnv.WrappedBlockchainOutputs[0].SethClient, testLogger) - } - /* - START THE VALIDATION PHASE - Check whether each feed has been updated with the expected prices, which workflow fetches from the price provider - */ - validatePoRPrices(t, testEnv, priceProvider, config) -} - -func executeVaultTest(t *testing.T, testEnv *TestEnvironment) { - // Skip till the errors with topology TopologyWorkflowGatewayCapabilities are fixed - // TODO: https://smartcontract-it.atlassian.net/browse/PRIV-160 - t.Skip() - - /* - BUILD ENVIRONMENT FROM SAVED STATE - */ - framework.L.Info().Msg("Getting gateway configuration...") - require.NotEmpty(t, testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations, "expected at least one gateway configuration") - gatewayURL, err := url.Parse(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Protocol + "://" + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Host + ":" + strconv.Itoa(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.ExternalPort) + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Path) - require.NoError(t, err, "failed to parse gateway URL") - - framework.L.Info().Msgf("Gateway URL: %s", gatewayURL.String()) - - framework.L.Info().Msgf("Sleeping 1 minute to allow the Vault DON to start...") - // TODO: Remove this sleep https://smartcontract-it.atlassian.net/browse/PRIV-154 - time.Sleep(1 * time.Minute) - framework.L.Info().Msgf("Sleep over. Executing test now...") - - secretID := strconv.Itoa(rand.Intn(10000)) // generate a random secret ID for testing - owner := "Owner1" - secretValue := "Secret Value to be stored" - - executeVaultSecretsCreateTest(t, secretValue, secretID, owner, gatewayURL.String()) - - framework.L.Info().Msg("------------------------------------------------------") - framework.L.Info().Msg("------------------------------------------------------") - framework.L.Info().Msg("------------------------------------------------------") - framework.L.Info().Msg("------------------------------------------------------") - framework.L.Info().Msg("------------------------------------------------------") - - executeVaultSecretsGetTest(t, secretValue, secretID, owner, gatewayURL.String()) - executeVaultSecretsUpdateTest(t, secretValue, secretID, owner, gatewayURL.String()) -} - -func executeVaultSecretsCreateTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { - framework.L.Info().Msg("Creating secret...") - uniqueRequestID := uuid.New().String() - - secretsCreateRequest := jsonrpc.Request[vaultcommon.CreateSecretsRequest]{ - Version: jsonrpc.JsonRpcVersion, - ID: uniqueRequestID, - Method: vaultapi.MethodSecretsCreate, - Params: &vaultcommon.CreateSecretsRequest{ - RequestId: uniqueRequestID, - EncryptedSecrets: []*vaultcommon.EncryptedSecret{ - { - Id: &vaultcommon.SecretIdentifier{ - Key: secretID, - Owner: owner, - // Namespace: "main", // Uncomment if you want to use namespaces - }, // Note: Namespace is not used in this test, but can be added if needed - EncryptedValue: encryptSecret(t, secretValue), - }, - }, - }, - } - requestBody, err := json.Marshal(secretsCreateRequest) - require.NoError(t, err, "failed to marshal secrets request") - - httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) - framework.L.Info().Msg("Checking jsonResponse structure...") - var jsonResponse jsonrpc.Response[vaultapi.SignedOCRResponse] - err = json.Unmarshal(httpResponseBody, &jsonResponse) - require.NoError(t, err, "failed to unmarshal getResponse") - framework.L.Info().Msgf("JSON Body: %v", jsonResponse) - if jsonResponse.Error != nil { - require.Empty(t, jsonResponse.Error.Error()) - } - require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) - require.Equal(t, vaultapi.MethodSecretsCreate, jsonResponse.Method) - - signedOCRResponse := jsonResponse.Result - framework.L.Info().Msgf("Signed OCR Response: %s", signedOCRResponse.String()) - - // TODO: Verify the authenticity of this signed report, by ensuring that the signatures indeed match the payload - - createSecretsResponse := vaultcommon.CreateSecretsResponse{} - err = protojson.Unmarshal(signedOCRResponse.Payload, &createSecretsResponse) - require.NoError(t, err, "failed to decode payload into CreateSecretsResponse proto") - framework.L.Info().Msgf("CreateSecretsResponse decoded as: %s", createSecretsResponse.String()) - - require.Len(t, createSecretsResponse.Responses, 1, "Expected one item in the response") - result0 := createSecretsResponse.GetResponses()[0] - require.Empty(t, result0.GetError()) - require.Equal(t, secretID, result0.GetId().Key) - require.Equal(t, owner, result0.GetId().Owner) - - framework.L.Info().Msg("Secret created successfully") -} - -func executeVaultSecretsUpdateTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { - framework.L.Info().Msg("Updating secret...") - uniqueRequestID := uuid.New().String() - - secretsUpdateRequest := jsonrpc.Request[vaultcommon.UpdateSecretsRequest]{ - Version: jsonrpc.JsonRpcVersion, - ID: uniqueRequestID, - Method: vaultapi.MethodSecretsUpdate, - Params: &vaultcommon.UpdateSecretsRequest{ - RequestId: uniqueRequestID, - EncryptedSecrets: []*vaultcommon.EncryptedSecret{ - { - Id: &vaultcommon.SecretIdentifier{ - Key: secretID, - Owner: owner, - }, - EncryptedValue: encryptSecret(t, secretValue), - }, - { - Id: &vaultcommon.SecretIdentifier{ - Key: "invalid", - Owner: "invalid", - }, - EncryptedValue: encryptSecret(t, secretValue), - }, - }, - }, - } - requestBody, err := json.Marshal(secretsUpdateRequest) - require.NoError(t, err, "failed to marshal secrets request") - - httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) - framework.L.Info().Msg("Checking jsonResponse structure...") - var jsonResponse jsonrpc.Response[vaultapi.SignedOCRResponse] - err = json.Unmarshal(httpResponseBody, &jsonResponse) - require.NoError(t, err, "failed to unmarshal getResponse") - framework.L.Info().Msgf("JSON Body: %v", jsonResponse) - if jsonResponse.Error != nil { - require.Empty(t, jsonResponse.Error.Error()) - } - - require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) - require.Equal(t, vaultapi.MethodSecretsUpdate, jsonResponse.Method) - - signedOCRResponse := jsonResponse.Result - framework.L.Info().Msgf("Signed OCR Response: %s", signedOCRResponse.String()) - - // TODO: Verify the authenticity of this signed report, by ensuring that the signatures indeed match the payload - - updateSecretsResponse := vaultcommon.UpdateSecretsResponse{} - err = protojson.Unmarshal(signedOCRResponse.Payload, &updateSecretsResponse) - require.NoError(t, err, "failed to decode payload into UpdateSecretsResponse proto") - framework.L.Info().Msgf("UpdateSecretsResponse decoded as: %s", updateSecretsResponse.String()) - - require.Len(t, updateSecretsResponse.Responses, 2, "Expected one item in the response") - result0 := updateSecretsResponse.GetResponses()[0] - require.Empty(t, result0.GetError()) - require.Equal(t, secretID, result0.GetId().Key) - require.Equal(t, owner, result0.GetId().Owner) - - result1 := updateSecretsResponse.GetResponses()[1] - require.Contains(t, result1.Error, "key does not exist") - - framework.L.Info().Msg("Secret updated successfully") -} - -func executeVaultSecretsGetTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { - uniqueRequestID := uuid.New().String() - framework.L.Info().Msg("Getting secret...") - secretsGetRequest := jsonrpc.Request[vaultcommon.GetSecretsRequest]{ - Version: jsonrpc.JsonRpcVersion, - Method: vaultapi.MethodSecretsGet, - Params: &vaultcommon.GetSecretsRequest{ - Requests: []*vaultcommon.SecretRequest{ - { - Id: &vaultcommon.SecretIdentifier{ - Key: secretID, - Owner: owner, - }, - }, - }, - }, - ID: uniqueRequestID, - } - requestBody, err := json.Marshal(secretsGetRequest) - require.NoError(t, err, "failed to marshal secrets request") - httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) - framework.L.Info().Msg("Checking jsonResponse structure...") - var jsonResponse jsonrpc.Response[json.RawMessage] - err = json.Unmarshal(httpResponseBody, &jsonResponse) - require.NoError(t, err, "failed to unmarshal http response body") - framework.L.Info().Msgf("JSON Body: %v", jsonResponse) - if jsonResponse.Error != nil { - require.Empty(t, jsonResponse.Error.Error()) - } - require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) - require.Equal(t, vaultapi.MethodSecretsGet, jsonResponse.Method) - - /* - * The json unmarshaling is not compatible with the proto oneof in vaultcommon.SecretResponse - * The Data and Error fields are oneof fields in the proto definition, but when unmarshaling to JSON, - * the JSON unmarshaler does not handle oneof fields correctly, leading to issues. - * To work around this, we define custom response types that match the expected structure. - * This allows us to unmarshal the JSON response correctly and access the fields as expected. - */ - type EncryptedShares struct { - Shares []string `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` - EncryptionKey string `protobuf:"bytes,2,opt,name=encryption_key,json=encryptionKey,proto3" json:"encryption_key,omitempty"` - } - type SecretData struct { - EncryptedValue string `protobuf:"bytes,2,opt,name=encrypted_value,json=encryptedValue,proto3" json:"encrypted_value,omitempty"` - EncryptedDecryptionKeyShares []*EncryptedShares `protobuf:"bytes,3,rep,name=encrypted_decryption_key_shares,json=encryptedDecryptionKeyShares,proto3" json:"encrypted_decryption_key_shares,omitempty"` - } - type SecretResponse struct { - ID *vaultcommon.SecretIdentifier `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Data *SecretData `protobuf:"bytes,2,opt,name=data,proto3"` - Error string `protobuf:"bytes,3,opt,name=error,proto3"` - } - type GetSecretsResponse struct { - Responses []*SecretResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` - } - /* - * - * - * - * - */ - - var getSecretsResponse GetSecretsResponse - err = json.Unmarshal(*jsonResponse.Result, &getSecretsResponse) - require.NoError(t, err, "failed to unmarshal getResponse") - - require.Len(t, getSecretsResponse.Responses, 1, "Expected one secret in the response") - result0 := getSecretsResponse.Responses[0] - require.Empty(t, result0.Error) - require.Equal(t, secretID, result0.ID.Key) - require.Equal(t, owner, result0.ID.Owner) - - framework.L.Info().Msg("Secret get successful") -} - -func sendVaultRequestToGateway(t *testing.T, gatewayURL string, requestBody []byte) []byte { - framework.L.Info().Msgf("Request Body: %s", string(requestBody)) - req, err := http.NewRequestWithContext(context.Background(), "POST", gatewayURL, bytes.NewBuffer(requestBody)) - require.NoError(t, err, "failed to create request") - - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "application/json") - - client := &http.Client{} - resp, err := client.Do(req) - require.NoError(t, err, "failed to execute request") - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - require.NoError(t, err, "failed to read jsonResponse body") - framework.L.Info().Msgf("Response Body: %s", string(body)) - require.Equal(t, http.StatusOK, resp.StatusCode, "Gateway endpoint should respond with 200 OK") - return body -} - -func encryptSecret(t *testing.T, secret string) string { - masterPublicKey := tdh2easy.PublicKey{} - masterPublicKeyBytes, err := hex.DecodeString(crevault.MasterPublicKeyStr) - require.NoError(t, err) - err = masterPublicKey.Unmarshal(masterPublicKeyBytes) - require.NoError(t, err) - cipher, err := tdh2easy.Encrypt(&masterPublicKey, []byte(secret)) - require.NoError(t, err) - cipherBytes, err := cipher.Marshal() - require.NoError(t, err) - return hex.EncodeToString(cipherBytes) -} - -func executeHTTPTriggerActionTest(t *testing.T, testEnv *TestEnvironment) { - homeChainSelector := testEnv.WrappedBlockchainOutputs[0].ChainSelector - testEnv.Logger.Info().Msg("Starting HTTP trigger and action test...") - - httpConfig := setupHTTPWorkflowTest(t, testEnv) - - compressedWorkflowWasmPath, err := creworkflow.CompileWorkflow(httpConfig.WorkflowLocation, httpConfig.WorkflowName) - require.NoError(t, err, "failed to compile workflow") - - workflowRegistryAddress, err := crecontracts.FindAddressesForChain( - testEnv.FullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - homeChainSelector, - keystone_changeset.WorkflowRegistry.String(), - ) - require.NoError(t, err, "failed to find workflow registry address for chain %d", homeChainSelector) - - t.Cleanup(func() { - wasmErr := os.Remove(compressedWorkflowWasmPath) - if wasmErr != nil { - framework.L.Warn().Msgf("failed to remove workflow wasm file %s: %s", compressedWorkflowWasmPath, wasmErr.Error()) - } - configErr := os.Remove(httpConfig.ConfigPath) - if configErr != nil { - framework.L.Warn().Msgf("failed to remove workflow config file %s: %s", httpConfig.ConfigPath, configErr.Error()) - } - deleteErr := creworkflow.DeleteWithContract(t.Context(), testEnv.WrappedBlockchainOutputs[0].SethClient, workflowRegistryAddress, httpConfig.WorkflowName) - if deleteErr != nil { - framework.L.Warn().Msgf("failed to delete workflow %s: %s. Please delete it manually.", httpConfig.WorkflowName, deleteErr.Error()) - } - }) - - copyWorkflowFilesToContainers(t, compressedWorkflowWasmPath, httpConfig.ConfigPath, ContainerTargetDir) - - regConfig := &WorkflowRegistrationConfig{ - WorkflowName: httpConfig.WorkflowName, - WorkflowLocation: httpConfig.WorkflowLocation, - ConfigFilePath: httpConfig.ConfigPath, - CompressedWasmPath: compressedWorkflowWasmPath, - WorkflowRegistryAddr: workflowRegistryAddress, - DonID: testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, - ContainerTargetDir: ContainerTargetDir, - } - registerWorkflow(t.Context(), t, regConfig, testEnv.WrappedBlockchainOutputs[0].SethClient, testEnv.Logger) - - testEnv.Logger.Info().Msg("Getting gateway configuration...") - require.NotEmpty(t, testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations, "expected at least one gateway configuration") - gatewayURL, err := url.Parse(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Protocol + "://" + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Host + ":" + strconv.Itoa(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.ExternalPort) + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Path) - require.NoError(t, err, "failed to parse gateway URL") - - workflowOwner, err := crypto.HexToECDSA(testEnv.WrappedBlockchainOutputs[0].DeployerPrivateKey) - require.NoError(t, err, "failed to convert private key to ECDSA") - workflowOwnerAddress := strings.ToLower(crypto.PubkeyToAddress(workflowOwner.PublicKey).Hex()) - - testEnv.Logger.Info().Msgf("Workflow owner address: %s", workflowOwnerAddress) - testEnv.Logger.Info().Msgf("Workflow name: %s", httpConfig.WorkflowName) - - executeHTTPTriggerRequest(t, testEnv, gatewayURL, httpConfig, workflowOwnerAddress) - - validateHTTPWorkflowRequest(t, testEnv) - - testEnv.Logger.Info().Msg("HTTP trigger and action test completed successfully") -} - -func createTestWorkflowConfig(t *testing.T, workflowName, mockServerURL string) (string, *ecdsa.PrivateKey) { - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - - publicKeyAddr := crypto.PubkeyToAddress(privateKey.PublicKey) - - parsedURL, err := url.Parse(mockServerURL) - require.NoError(t, err, "failed to parse mock server URL") - - url := fmt.Sprintf("%s:%s", framework.HostDockerInternal(), parsedURL.Port()) - framework.L.Info().Msgf("Mock server URL transformed from '%s' to '%s' for Docker access", mockServerURL, url) - - config := map[string]interface{}{ - "authorizedKey": publicKeyAddr.Hex(), - "url": url + "/orders", - } - - configBytes, err := json.Marshal(config) - require.NoError(t, err, "failed to marshal config") - - configFileName := fmt.Sprintf("test_http_workflow_config_%s.json", workflowName) - configPath := filepath.Join(os.TempDir(), configFileName) - - err = os.WriteFile(configPath, configBytes, 0644) //nolint:gosec // this is a test file - require.NoError(t, err, "failed to write config file") - - return configPath, privateKey -} - -func createHTTPTriggerRequestWithKey(t *testing.T, workflowName, workflowOwner string, privateKey *ecdsa.PrivateKey) jsonrpc.Request[json.RawMessage] { - triggerPayload := gateway_common.HTTPTriggerRequest{ - Workflow: gateway_common.WorkflowSelector{ - WorkflowOwner: workflowOwner, - WorkflowName: workflowName, - WorkflowTag: "TEMP_TAG", - }, - Input: []byte(`{ - "customer": "test-customer", - "size": "large", - "toppings": ["cheese", "pepperoni"], - "dedupe": false - }`), - } - - payloadBytes, err := json.Marshal(triggerPayload) - require.NoError(t, err) - rawPayload := json.RawMessage(payloadBytes) - - req := jsonrpc.Request[json.RawMessage]{ - Version: jsonrpc.JsonRpcVersion, - Method: gateway_common.MethodWorkflowExecute, - Params: &rawPayload, - ID: "http-trigger-test-" + uuid.New().String()[0:8], - } - - token, err := utils.CreateRequestJWT(req) - require.NoError(t, err) - - tokenString, err := token.SignedString(privateKey) - require.NoError(t, err) - req.Auth = tokenString - - return req -} - -type HTTPTestConfig struct { - WorkflowName string - FakeServer *fake.Output - SigningKey *ecdsa.PrivateKey - ConfigPath string - WorkflowLocation string -} - -// startTestOrderServer creates a fake HTTP server that records requests and returns proper responses for order endpoints -func startTestOrderServer(t *testing.T, port int) (*fake.Output, error) { - fakeInput := &fake.Input{ - Port: port, - } - - fakeOutput, err := fake.NewFakeDataProvider(fakeInput) - if err != nil { - return nil, err - } - - // Set up the /orders endpoint - response := map[string]interface{}{ - "orderId": "test-order-" + uuid.New().String()[0:8], - "status": "success", - "message": "Order processed successfully", - } - - err = fake.JSON("POST", "/orders", response, 200) - require.NoError(t, err, "failed to set up /orders endpoint") - - framework.L.Info().Msgf("Test order server started on port %d at: %s", port, fakeOutput.BaseURLHost) - return fakeOutput, nil -} - -// setupHTTPWorkflowTest sets up the HTTP workflow test infrastructure -func setupHTTPWorkflowTest(t *testing.T, testEnv *TestEnvironment) *HTTPTestConfig { - fakeServer, err := startTestOrderServer(t, testEnv.Config.Fake.Port) - require.NoError(t, err, "failed to start fake HTTP server") - - workflowName := "http-trigger-action-test-" + uuid.New().String()[0:8] - configPath, signingKey := createTestWorkflowConfig(t, workflowName, fakeServer.BaseURLHost) - - return &HTTPTestConfig{ - WorkflowName: workflowName, - FakeServer: fakeServer, - SigningKey: signingKey, - ConfigPath: configPath, - WorkflowLocation: HTTPWorkflowLocation, - } -} - -// executeHTTPTriggerRequest executes an HTTP trigger request and waits for successful response -func executeHTTPTriggerRequest(t *testing.T, testEnv *TestEnvironment, gatewayURL *url.URL, httpConfig *HTTPTestConfig, workflowOwnerAddress string) { - var finalResponse jsonrpc.Response[json.RawMessage] - var triggerRequest jsonrpc.Request[json.RawMessage] - - require.Eventually(t, func() bool { - triggerRequest = createHTTPTriggerRequestWithKey(t, httpConfig.WorkflowName, workflowOwnerAddress, httpConfig.SigningKey) - triggerRequestBody, err := json.Marshal(triggerRequest) - if err != nil { - testEnv.Logger.Warn().Msgf("Failed to marshal trigger request: %v", err) - return false - } - - testEnv.Logger.Info().Msgf("Gateway URL: %s", gatewayURL.String()) - testEnv.Logger.Info().Msg("Executing HTTP trigger request with retries until workflow is loaded...") - - req, err := http.NewRequestWithContext(t.Context(), "POST", gatewayURL.String(), bytes.NewBuffer(triggerRequestBody)) - if err != nil { - testEnv.Logger.Warn().Msgf("Failed to create request: %v", err) - return false - } - req.Header.Set("Content-Type", "application/jsonrpc") - req.Header.Set("Accept", "application/json") - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - testEnv.Logger.Warn().Msgf("Failed to execute request: %v", err) - return false - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - testEnv.Logger.Warn().Msgf("Failed to read response body: %v", err) - return false - } - - testEnv.Logger.Info().Msgf("HTTP trigger response (status %d): %s", resp.StatusCode, string(body)) - - if resp.StatusCode != http.StatusOK { - testEnv.Logger.Warn().Msgf("Gateway returned status %d, retrying...", resp.StatusCode) - return false - } - - err = json.Unmarshal(body, &finalResponse) - if err != nil { - testEnv.Logger.Warn().Msgf("Failed to unmarshal response: %v", err) - return false - } - - if finalResponse.Error != nil { - testEnv.Logger.Warn().Msgf("JSON-RPC error in response: %v", finalResponse.Error) - return false - } - - testEnv.Logger.Info().Msg("Successfully received 200 OK response from gateway") - return true - }, tests.WaitTimeout(t), RetryInterval, "gateway should respond with 200 OK and valid response once workflow is loaded") - - require.Equal(t, jsonrpc.JsonRpcVersion, finalResponse.Version, "expected JSON-RPC version %s, got %s", jsonrpc.JsonRpcVersion, finalResponse.Version) - require.Equal(t, triggerRequest.ID, finalResponse.ID, "expected response ID %s, got %s", triggerRequest.ID, finalResponse.ID) - require.Nil(t, finalResponse.Error, "unexpected error in response: %v", finalResponse.Error) -} - -// validateHTTPWorkflowRequest validates that the workflow made the expected HTTP request -func validateHTTPWorkflowRequest(t *testing.T, testEnv *TestEnvironment) { - require.Eventually(t, func() bool { - records, err := fake.R.Get("POST", "/orders") - return err == nil && len(records) > 0 - }, tests.WaitTimeout(t), RetryInterval, "workflow should have made at least one HTTP request to mock server") - - records, err := fake.R.Get("POST", "/orders") - require.NoError(t, err, "failed to get recorded requests") - require.NotEmpty(t, records, "no requests recorded") - - recordedRequest := records[0] - testEnv.Logger.Info().Msgf("Recorded request: %+v", recordedRequest) - - require.Equal(t, "POST", recordedRequest.Method, "expected POST method") - require.Equal(t, "/orders", recordedRequest.Path, "expected /orders endpoint") - require.Equal(t, "application/json", recordedRequest.Headers.Get("Content-Type"), "expected JSON content type") - - var workflowRequestBody map[string]interface{} - err = json.Unmarshal([]byte(recordedRequest.ReqBody), &workflowRequestBody) - require.NoError(t, err, "request body should be valid JSON") - - require.Equal(t, "test-customer", workflowRequestBody["customer"], "expected customer field") - require.Equal(t, "large", workflowRequestBody["size"], "expected size field") - require.Contains(t, workflowRequestBody, "toppings", "expected toppings field") -} - -type configureDataFeedsCacheInput struct { - chainSelector uint64 - fullCldEnvironment *cldf.Environment - workflowName string - feedID string - sethClient *seth.Client - blockchain *blockchain.Output -} - -func configureDataFeedsCacheContract(testLogger zerolog.Logger, input *configureDataFeedsCacheInput) error { - forwarderAddress, forwarderErr := crecontracts.FindAddressesForChain(input.fullCldEnvironment.ExistingAddresses, input.chainSelector, keystone_changeset.KeystoneForwarder.String()) //nolint:staticcheck // won't migrate now - if forwarderErr != nil { - return errors.Wrapf(forwarderErr, "failed to find forwarder address for chain %d", input.chainSelector) - } - - dataFeedsCacheAddress, dataFeedsCacheErr := crecontracts.FindAddressesForChain(input.fullCldEnvironment.ExistingAddresses, input.chainSelector, df_changeset.DataFeedsCache.String()) //nolint:staticcheck // won't migrate now - if dataFeedsCacheErr != nil { - return errors.Wrapf(dataFeedsCacheErr, "failed to find data feeds cache address for chain %d", input.chainSelector) - } - - configInput := &cre.ConfigureDataFeedsCacheInput{ - CldEnv: input.fullCldEnvironment, - ChainSelector: input.chainSelector, - FeedIDs: []string{input.feedID}, - Descriptions: []string{"PoR test feed"}, - DataFeedsCacheAddress: dataFeedsCacheAddress, - AdminAddress: input.sethClient.MustGetRootKeyAddress(), - AllowedSenders: []common.Address{forwarderAddress}, - AllowedWorkflowNames: []string{input.workflowName}, - AllowedWorkflowOwners: []common.Address{input.sethClient.MustGetRootKeyAddress()}, - } - - _, configErr := crecontracts.ConfigureDataFeedsCache(testLogger, configInput) - - return configErr -} - -// Creates workflow configuration file storing the necessary values used by a workflow (i.e. feedID, read/write contract addresses) -// The values are written to types.WorkflowConfig -func createWorkflowConfigFile(bcOutput *cre.WrappedBlockchainOutput, readContractAddress, feedsConsumerAddress common.Address, workflowName, feedID, dataURL, writeTargetName string) (string, error) { - cleanFeedID := strings.TrimPrefix(feedID, "0x") - feedLength := len(cleanFeedID) - - if feedLength < 32 { - return "", errors.Errorf("feed ID must be at least 32 characters long, but was %d", feedLength) - } - - if feedLength > 32 { - cleanFeedID = cleanFeedID[:32] - } - - feedIDToUse := "0x" + cleanFeedID - chainFamily := bcOutput.BlockchainOutput.Family - chainID := bcOutput.BlockchainOutput.ChainID - - workflowConfig := portypes.WorkflowConfig{ - ChainFamily: chainFamily, - ChainID: chainID, - BalanceReaderConfig: portypes.BalanceReaderConfig{ - BalanceReaderAddress: readContractAddress.Hex(), - }, - ComputeConfig: portypes.ComputeConfig{ - FeedID: feedIDToUse, - URL: dataURL, - DataFeedsCacheAddress: feedsConsumerAddress.Hex(), - WriteTargetName: writeTargetName, - }, - } - - // Write workflow config to a file - configMarshalled, err := yaml.Marshal(workflowConfig) - if err != nil { - return "", errors.Wrap(err, "failed to marshal workflow config") - } - outputFile := workflowName + "_config.yaml" - - // remove the file if it already exists - _, statErr := os.Stat(outputFile) - if statErr == nil { - if err := os.Remove(outputFile); err != nil { - return "", errors.Wrap(err, "failed to remove existing output file") - } - } - - if err := os.WriteFile(outputFile, configMarshalled, 0644); err != nil { //nolint:gosec // G306: we want it to be readable by everyone - return "", errors.Wrap(err, "failed to write output file") - } - - outputFileAbsPath, outputFileAbsPathErr := filepath.Abs(outputFile) - if outputFileAbsPathErr != nil { - return "", errors.Wrap(outputFileAbsPathErr, "failed to get absolute path of the config file") - } - - return outputFileAbsPath, nil -} - -func executeBeholderTest(t *testing.T, testEnv *TestEnvironment) { - testLogger := framework.L - - bErr := startBeholderStackIfIsNotRunning(DefaultBeholderStackCacheFile, DefaultEnvironmentDir) - require.NoError(t, bErr, "failed to start Beholder") - - chipConfig, chipErr := loadBeholderStackCache() - require.NoError(t, chipErr, "failed to load chip ingress cache") - require.NotNil(t, chipConfig.ChipIngress.Output.RedPanda.KafkaExternalURL, "kafka external url is not set in the cache") - require.NotEmpty(t, chipConfig.Kafka.Topics, "kafka topics are not set in the cache") - - workflowFileLocation := "../../../../core/scripts/cre/environment/examples/workflows/v2/cron/main.go" - workflowName := "cronbeholder" - - compressedWorkflowWasmPath, compileErr := creworkflow.CompileWorkflow(workflowFileLocation, workflowName) - require.NoError(t, compileErr, "failed to compile workflow '%s'", workflowFileLocation) - - homeChainSelector := testEnv.WrappedBlockchainOutputs[0].ChainSelector - workflowRegistryAddress, workflowRegistryErr := crecontracts.FindAddressesForChain( - testEnv.FullCldEnvOutput.Environment.ExistingAddresses, //nolint:staticcheck // won't migrate now - homeChainSelector, - keystone_changeset.WorkflowRegistry.String(), - ) - require.NoError(t, workflowRegistryErr, "failed to find workflow registry address for chain %d", testEnv.WrappedBlockchainOutputs[0].ChainID) - - t.Cleanup(func() { - wasmErr := os.Remove(compressedWorkflowWasmPath) - if wasmErr != nil { - framework.L.Warn().Msgf("failed to remove workflow wasm file %s: %s", compressedWorkflowWasmPath, wasmErr.Error()) - } - - deleteErr := creworkflow.DeleteWithContract(t.Context(), testEnv.WrappedBlockchainOutputs[0].SethClient, workflowRegistryAddress, workflowName) - if deleteErr != nil { - framework.L.Warn().Msgf("failed to delete workflow %s: %s. Please delete it manually.", workflowName, deleteErr.Error()) - } - }) - - workflowCopyErr := creworkflow.CopyArtifactToDockerContainers(compressedWorkflowWasmPath, creworkflow.DefaultWorkflowNodePattern, creworkflow.DefaultWorkflowTargetDir) - require.NoError(t, workflowCopyErr, "failed to copy workflow to docker containers") - - _, registerErr := creworkflow.RegisterWithContract( - t.Context(), - testEnv.WrappedBlockchainOutputs[0].SethClient, // crucial to use Seth Client connected to home chain (first chain in the set) - workflowRegistryAddress, - testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, - workflowName, - "file://"+compressedWorkflowWasmPath, - nil, - nil, - &creworkflow.DefaultWorkflowTargetDir, - ) - require.NoError(t, registerErr, "failed to register cron beholder workflow") - - listenerCtx, cancelListener := context.WithTimeout(t.Context(), 2*time.Minute) - t.Cleanup(func() { - cancelListener() - }) - - kafkaErrChan := make(chan error, 1) - messageChan := make(chan proto.Message, 10) - - // We are interested in UserLogs (successful execution) - // or BaseMessage with specific error message (engine initialization failure) - messageTypes := map[string]func() proto.Message{ - "workflows.v1.UserLogs": func() proto.Message { - return &workflow_events.UserLogs{} - }, - "BaseMessage": func() proto.Message { - return &common_events.BaseMessage{} - }, - } - - // Start listening for messages in the background - go func() { - listenForKafkaMessages(listenerCtx, testLogger, chipConfig.ChipIngress.Output.RedPanda.KafkaExternalURL, chipConfig.Kafka.Topics[0], messageTypes, messageChan, kafkaErrChan) - }() - - expectedUserLog := "Amazing workflow user log" - - foundExpectedLog := make(chan bool, 1) // Channel to signal when expected log is found - foundErrorLog := make(chan bool, 1) // Channel to signal when engine initialization failure is detected - receivedUserLogs := 0 - // Start message processor goroutine - go func() { - for { - select { - case <-listenerCtx.Done(): - return - case msg := <-messageChan: - // Process received messages - switch typedMsg := msg.(type) { - case *common_events.BaseMessage: - if strings.Contains(typedMsg.Msg, "Workflow Engine initialization failed") { - foundErrorLog <- true - } - case *workflow_events.UserLogs: - testLogger.Info(). - Msg("🎉 Received UserLogs message in test") - receivedUserLogs++ - - for _, logLine := range typedMsg.LogLines { - if strings.Contains(logLine.Message, expectedUserLog) { - testLogger.Info(). - Str("expected_log", expectedUserLog). - Str("found_message", strings.TrimSpace(logLine.Message)). - Msg("🎯 Found expected user log message!") - - select { - case foundExpectedLog <- true: - default: // Channel might already have a value - } - return // Exit the processor goroutine - } - testLogger.Warn(). - Str("expected_log", expectedUserLog). - Str("found_message", strings.TrimSpace(logLine.Message)). - Msg("Received UserLogs message, but it does not match expected log") - } - default: - // ignore other message types - } - } - } - }() - - timeout := 2 * time.Minute - - testLogger.Info(). - Str("expected_log", expectedUserLog). - Dur("timeout", timeout). - Msg("Waiting for expected user log message or timeout") - - // Wait for either the expected log to be found, or engine initialization failure to be detected, or timeout (2 minutes) - select { - case <-foundExpectedLog: - testLogger.Info(). - Str("expected_log", expectedUserLog). - Msg("✅ Test completed successfully - found expected user log message!") - return - case <-foundErrorLog: - require.Fail(t, "Test completed with error - found engine initialization failure message!") - case <-time.After(timeout): - testLogger.Error().Msg("Timed out waiting for expected user log message") - if receivedUserLogs > 0 { - testLogger.Warn().Int("received_user_logs", receivedUserLogs).Msg("Received some UserLogs messages, but none matched expected log") - } else { - testLogger.Warn().Msg("Did not receive any UserLogs messages") - } - require.Failf(t, "Timed out waiting for expected user log message", "Expected user log message: '%s' not found after %s", expectedUserLog, timeout.String()) - case err := <-kafkaErrChan: - testLogger.Error().Err(err).Msg("Kafka listener encountered an error during execution") - require.Fail(t, "Kafka listener failed", err.Error()) - } - - testLogger.Info().Msg("Beholder test completed") -} - -func createEnvironmentIfNotExists(environmentDir string) error { - cachedConfigFile, cacheErr := ctfConfigToCacheFile() - if cacheErr != nil { - return errors.Wrap(cacheErr, "failed to get cached config file") - } - - if _, err := os.Stat(cachedConfigFile); os.IsNotExist(err) { - framework.L.Info().Str("cached_config_file", cachedConfigFile).Msg("Cached config file does not exist, starting environment...") - cmd := exec.Command("go", "run", ".", "env", "start") - cmd.Dir = environmentDir - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmdErr := cmd.Run() - if cmdErr != nil { - return errors.Wrap(cmdErr, "failed to start environment") - } - } - - return nil -} - -func setConfigurationIfMissing(configName, envArtifactPath string) error { - if os.Getenv("CTF_CONFIGS") == "" { - err := os.Setenv("CTF_CONFIGS", configName) - if err != nil { - return errors.Wrap(err, "failed to set CTF_CONFIGS env var") - } - } - - if os.Getenv("ENV_ARTIFACT_PATH") == "" { - err := os.Setenv("ENV_ARTIFACT_PATH", envArtifactPath) - if err != nil { - return errors.Wrap(err, "failed to set ENV_ARTIFACT_PATH env var") - } - } - - return environment.SetDefaultPrivateKeyIfEmpty(blockchain.DefaultAnvilPrivateKey) -} - -func ctfConfigToCacheFile() (string, error) { - configFile := os.Getenv("CTF_CONFIGS") - if configFile == "" { - return "", errors.New("CTF_CONFIGS env var is not set") - } - - if strings.HasSuffix(configFile, "-cache.toml") { - return configFile, nil - } - - split := strings.Split(configFile, ",") - return strings.ReplaceAll(split[0], ".toml", "") + "-cache.toml", nil -} diff --git a/system-tests/tests/smoke/cre/cre_suite_test.go b/system-tests/tests/smoke/cre/cre_suite_test.go new file mode 100644 index 00000000000..2f7e777db1d --- /dev/null +++ b/system-tests/tests/smoke/cre/cre_suite_test.go @@ -0,0 +1,60 @@ +package cre + +import ( + "testing" + "time" +) + +// TODO: Where to move these constants? Is it appropriate place for keeping defaults here? +const ( + AuthorizationKeySecretName = "AUTH_KEY" + // TODO: use once we can run these tests in CI (https://smartcontract-it.atlassian.net/browse/DX-589) + // AuthorizationKey = "12a-281j&@91.sj1:_}" + // It is needed for FakePriceProvider + AuthorizationKey = "" + + // Test configuration constants + DefaultVerificationTimeout = 5 * time.Minute + DefaultRetryInterval = 2 * time.Second + DefaultValidationInterval = 10 * time.Second +) + +/* +To execute tests locally start the local CRE first: +Inside `core/scripts/cre/environment` directory + 1. Ensure the necessary capabilities (i.e. readcontract, http-trigger, http-action) are listed in the environment configuration + 2. Run: `go run . env start && ctf obs up && ctf bs up` to start env + observability + blockscout. + 3. Execute the tests in `system-tests/tests/smoke/cre`: `go test -timeout 15m -run ^Test_CRE_Suite$`. +*/ +func Test_CRE_Suite(t *testing.T) { + testEnv := SetupTestEnvironment(t) + + // WARNING: currently we can't run these tests in parallel, because each test rebuilds environment structs and that includes + // logging into CL node with GraphQL API, which allows only 1 session per user at a time. + t.Run("[v1] CRE Suite", func(t *testing.T) { + // requires `readcontract`, `cron` + t.Run("[v1] CRE Proof of Reserve (PoR) Test", func(t *testing.T) { + ExecutePoRTest(t, testEnv) + }) + }) + + t.Run("[v2] CRE Suite", func(t *testing.T) { + t.Run("[v2] vault DON test", func(t *testing.T) { + ExecuteVaultTest(t, testEnv) + }) + + t.Run("[v2] HTTP trigger and action test", func(t *testing.T) { + // requires `http_trigger`, `http_action` + ExecuteHTTPTriggerActionTest(t, testEnv) + }) + + t.Run("[v2] DON Time test", func(t *testing.T) { + const skipReason = "Implement smoke test - https://smartcontract-it.atlassian.net/browse/CAPPL-1028" + t.Skipf("Skipping test for the following reason: %s", skipReason) + }) + + t.Run("[v2] Beholder test", func(t *testing.T) { + ExecuteBeholderTest(t, testEnv) + }) + }) +} diff --git a/system-tests/tests/smoke/cre/helpers_test.go b/system-tests/tests/smoke/cre/helpers_test.go new file mode 100644 index 00000000000..69c35a1c6ec --- /dev/null +++ b/system-tests/tests/smoke/cre/helpers_test.go @@ -0,0 +1,206 @@ +// helpers_test.go +// +// This file contains reusable test helper functions that encapsulate common, +// logically grouped test-specific steps. They hide and abstract away +// the complexities of the test setup and execution. +// +// All helpers here are intentionally unexported functions (lowercase) +// so they do not leak outside this package. +// +// By keeping repeated setup and execution logic in one place, +// we make individual tests shorter, clearer, and easier to maintain. +// +// Recommendations: +// 1. Keep naming action-oriented: mustStartDB, withEnv, seedUsers. +// 2. Ensure proper cleanup after steps, where necessary, to avoid side effects. +package cre + +import ( + "context" + "fmt" + "slices" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/ptr" + "github.com/smartcontractkit/chainlink-testing-framework/seth" + + "github.com/smartcontractkit/chainlink/system-tests/lib/cre" + "github.com/smartcontractkit/chainlink/system-tests/lib/cre/flags" + creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" + + portypes "github.com/smartcontractkit/chainlink/core/scripts/cre/environment/examples/workflows/v1/proof-of-reserve/cron-based/types" +) + +// Generic WorkflowConfig interface for creation of different workflow configurations +// Register your workflow configuration types here +type WorkflowConfig interface { + None | portypes.WorkflowConfig | HTTPWorkflowConfig +} + +// None represents an empty workflow configuration +// It is used to satisfy the workflowConfigFactory, avoiding workflow config creation +type None struct{} + +// WorkflowRegistrationConfig holds configuration for workflow registration +type WorkflowRegistrationConfig struct { + WorkflowName string + WorkflowLocation string + ConfigFilePath string + CompressedWasmPath string + SecretsURL string + WorkflowRegistryAddr common.Address + DonID uint64 + ContainerTargetDir string +} + +///////////////////////// +// ENVIRONMENT HELPERS // +///////////////////////// + +/* +Parse through chain configs and extract "writable" chain IDs. +If a chain requires a Forwarder contract, it is considered a "writable" chain. + +Recommendation: Use it to determine on which chains to deploy certain contracts and register workflows. +See an example in a test using PoR workflow. +*/ +func getWritableChainsFromSavedEnvironmentState(t *testing.T, testEnv *TestEnvironment) []uint64 { + t.Helper() + + var testLogger = framework.L + testLogger.Info().Msg("Getting writable chains from saved environment state.") + writeableChains := []uint64{} + for _, bcOutput := range testEnv.WrappedBlockchainOutputs { + for _, donMetadata := range testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata { + if flags.RequiresForwarderContract(donMetadata.Flags, bcOutput.ChainID) { + if !slices.Contains(writeableChains, bcOutput.ChainID) { + writeableChains = append(writeableChains, bcOutput.ChainID) + } + } + } + } + testLogger.Info().Msgf("Writable chains: '%v'", writeableChains) + return writeableChains +} + +////////////////////////////// +// WORKFLOW-RELATED HELPERS // +////////////////////////////// + +/* +Creates the necessary workflow artifacts based on WorkflowConfig: + 1. Configuration for a workflow (or no config if typed nil is passed for workflowConfig); + 2. Compiled and compressed workflow WASM file; + 3. Copies the workflow artifacts to the Docker containers + +It returns the paths to: + 1. the compressed WASM file; + 2. the workflow config file. +*/ +func createWorkflowArtifacts[T WorkflowConfig](t *testing.T, testLogger zerolog.Logger, workflowName string, workflowConfig *T, workflowFileLocation string) (string, string) { + t.Helper() + + workflowConfigFilePath := workflowConfigFactory(t, testLogger, workflowName, workflowConfig) + compressedWorkflowWasmPath, compileErr := creworkflow.CompileWorkflow(workflowFileLocation, workflowName) + require.NoError(t, compileErr, "failed to compile workflow '%s'", workflowFileLocation) + testLogger.Info().Msg("Workflow compiled successfully.") + + // Copy workflow artifacts to Docker containers to use blockchain client running inside for workflow registration + testLogger.Info().Msg("Copying workflow artifacts to Docker containers.") + copyErr := creworkflow.CopyArtifactsToDockerContainers(creworkflow.DefaultWorkflowTargetDir, creworkflow.DefaultWorkflowNodePattern, compressedWorkflowWasmPath, workflowConfigFilePath) + require.NoError(t, copyErr, "failed to copy workflow artifacts to docker containers") + testLogger.Info().Msg("Workflow artifacts successfully copied to the Docker containers.") + + return compressedWorkflowWasmPath, workflowConfigFilePath +} + +/* +Creates the necessary workflow configuration based on a type registered in the WorkflowConfig interface +Pass `nil` to skip workflow config file creation. + +Returns the path to the workflow config file. +*/ +func workflowConfigFactory[T WorkflowConfig](t *testing.T, testLogger zerolog.Logger, workflowName string, workflowConfig *T) (filePath string) { + t.Helper() + + var workflowConfigFilePath string + + // nil is an acceptable argument that allows skipping config file creation when it is not necessary + if workflowConfig != nil { + switch cfg := any(workflowConfig).(type) { + case *None: + workflowConfigFilePath = "" + testLogger.Info().Msg("Workflow config file is not requested and will not be created.") + + case *portypes.WorkflowConfig: + workflowCfgFilePath, configErr := createPoRWorkflowConfigFile(workflowName, cfg) + workflowConfigFilePath = workflowCfgFilePath + require.NoError(t, configErr, "failed to create PoR workflow config file") + testLogger.Info().Msg("PoR Workflow config file created.") + + case *HTTPWorkflowConfig: + workflowCfgFilePath, configErr := createHTTPWorkflowConfigFile(workflowName, cfg) + workflowConfigFilePath = workflowCfgFilePath + require.NoError(t, configErr, "failed to create HTTP workflow config file") + testLogger.Info().Msg("HTTP Workflow config file created.") + + default: + require.NoError(t, fmt.Errorf("unsupported workflow config type: %T", cfg)) + } + } + return workflowConfigFilePath +} + +/* +Registers a workflow with the specified configuration. +*/ +func registerWorkflow(ctx context.Context, t *testing.T, workflowConfig *WorkflowRegistrationConfig, sethClient *seth.Client, testLogger zerolog.Logger) { + t.Helper() + + workflowRegistryAddress := workflowConfig.WorkflowRegistryAddr + donID := workflowConfig.DonID + workflowName := workflowConfig.WorkflowName + binaryURL := "file://" + workflowConfig.CompressedWasmPath + configURL := ptr.Ptr("file://" + workflowConfig.ConfigFilePath) + containerTargetDir := &workflowConfig.ContainerTargetDir + + if workflowConfig.ConfigFilePath == "" { + configURL = nil + } + + workflowID, registerErr := creworkflow.RegisterWithContract( + ctx, + sethClient, + workflowRegistryAddress, + donID, + workflowName, + binaryURL, + configURL, + nil, // no secrets yet + containerTargetDir, + ) + require.NoError(t, registerErr, "failed to register workflow '%s'", workflowConfig.WorkflowName) + testLogger.Info().Msgf("Workflow registered successfully: '%s'", workflowID) +} + +/* +Deletes workflows from: + 1. Local environment + 2. Workflow Registry +*/ +func deleteWorkflows(t *testing.T, uniqueWorkflowName string, workflowConfigFilePath string, compressedWorkflowWasmPath string, blockchainOutputs []*cre.WrappedBlockchainOutput, workflowRegistryAddress common.Address) { + t.Helper() + + var testLogger = framework.L + testLogger.Info().Msgf("Deleting workflow artifacts (%s) after test.\n", uniqueWorkflowName) + localEnvErr := creworkflow.RemoveWorkflowArtifactsFromLocalEnv(workflowConfigFilePath, compressedWorkflowWasmPath) + require.NoError(t, localEnvErr, "failed to remove workflow artifacts from local environment") + + deleteErr := creworkflow.DeleteWithContract(t.Context(), blockchainOutputs[0].SethClient, workflowRegistryAddress, uniqueWorkflowName) + require.NoError(t, deleteErr, "failed to delete workflow '%s'. Please delete/unregister it manually.", uniqueWorkflowName) +} diff --git a/system-tests/tests/smoke/cre/por_price_provider.go b/system-tests/tests/smoke/cre/por_price_provider.go index 69d23e68a5c..b1db42c5d7d 100644 --- a/system-tests/tests/smoke/cre/por_price_provider.go +++ b/system-tests/tests/smoke/cre/por_price_provider.go @@ -191,6 +191,7 @@ func cleanFeedID(feedID string) string { } func NewFakePriceProvider(testLogger zerolog.Logger, input *fake.Input, authKey string, feedIDs []string) (PriceProvider, error) { + testLogger.Info().Msg("Creating a new fake price provider...") cleanFeedIDs := make([]string, 0, len(feedIDs)) // workflow is sending feedIDs with 0x prefix and 32 bytes for _, feedID := range feedIDs { @@ -227,6 +228,7 @@ func NewFakePriceProvider(testLogger zerolog.Logger, input *fake.Input, authKey return nil, errors.Wrap(err, "failed to set up fake data provider") } + testLogger.Info().Msgf("Fake price provider successfully set up.") return &FakePriceProvider{ testLogger: testLogger, expectedPrices: expectedPrices, diff --git a/system-tests/tests/smoke/cre/v1_por_test.go b/system-tests/tests/smoke/cre/v1_por_test.go new file mode 100644 index 00000000000..8ce8085038f --- /dev/null +++ b/system-tests/tests/smoke/cre/v1_por_test.go @@ -0,0 +1,287 @@ +package cre + +import ( + "fmt" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v3" + + "github.com/smartcontractkit/chainlink-evm/gethwrappers/data-feeds/generated/data_feeds_cache" + "github.com/smartcontractkit/chainlink-testing-framework/framework" + "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" + + df_changeset "github.com/smartcontractkit/chainlink/deployment/data-feeds/changeset" + keystone_changeset "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" + + corevm "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + + "github.com/smartcontractkit/chainlink/system-tests/lib/cre" + crecontracts "github.com/smartcontractkit/chainlink/system-tests/lib/cre/contracts" + creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" + + portypes "github.com/smartcontractkit/chainlink/core/scripts/cre/environment/examples/workflows/v1/proof-of-reserve/cron-based/types" +) + +type WorkflowTestConfig struct { + WorkflowName string + WorkflowFileLocation string + FeedIDs []string + Timeout time.Duration +} + +func ExecutePoRTest(t *testing.T, testEnv *TestEnvironment) { + testLogger := framework.L + PoRWorkflowFileLocation := "../../../../core/scripts/cre/environment/examples/workflows/v1/proof-of-reserve/cron-based/main.go" + blockchainOutputs := testEnv.WrappedBlockchainOutputs + homeChainSelector := blockchainOutputs[0].ChainSelector + baseWorkflowName := "por-workflow" + feedIDs := []string{"018e16c39e000320000000000000000000000000000000000000000000000000", "018e16c38e000320000000000000000000000000000000000000000000000000"} + baseWorkflowTestConfig := &WorkflowTestConfig{ + WorkflowName: baseWorkflowName, + WorkflowFileLocation: PoRWorkflowFileLocation, + FeedIDs: feedIDs, + Timeout: DefaultVerificationTimeout, + } + + writeableChains := getWritableChainsFromSavedEnvironmentState(t, testEnv) + require.Len(t, baseWorkflowTestConfig.FeedIDs, len(writeableChains), "a number of writeable chains must match the number of feed IDs (check what chains 'evm' and 'write-evm' capabilities are enabled for)") + + priceProvider, err := NewFakePriceProvider(testLogger, testEnv.Config.Fake, AuthorizationKey, feedIDs) + require.NoError(t, err, "failed to create fake price provider") + + /* + DEPLOY DATA FEEDS CACHE + READ BALANCES CONTRACTS ON ALL CHAINS (except read-only ones) + Workflow will write price data to the data feeds cache contract + + REGISTER ONE WORKFLOW PER CHAIN (except read-only ones) + */ + for idx, bcOutput := range blockchainOutputs { + chainFamily := bcOutput.BlockchainOutput.Family + chainID := bcOutput.ChainID + chainSelector := bcOutput.ChainSelector + chainType := bcOutput.BlockchainOutput.Type + fullCldEnvOutput := testEnv.FullCldEnvOutput + feedID := baseWorkflowTestConfig.FeedIDs[idx] + + if chainType == blockchain.FamilySolana { + continue + } + + // Deploy Data Feeds Cache contract only on chains that are writable + if !slices.Contains(writeableChains, chainID) { + continue + } + + testLogger.Info().Msgf("Deploying additional contracts to chain %d (%d)", chainID, chainSelector) + dataFeedsCacheAddress, dfOutput, dfErr := crecontracts.DeployDataFeedsCacheContract(testLogger, chainSelector, fullCldEnvOutput) + require.NoError(t, dfErr, "failed to deploy Data Feeds Cache contract on chain %d", chainSelector) + + readBalancesAddress, rbOutput, rbErr := crecontracts.DeployReadBalancesContract(testLogger, chainSelector, fullCldEnvOutput) + require.NoError(t, rbErr, "failed to deploy Read Balances contract on chain %d", chainSelector) + crecontracts.MergeAllDataStores(fullCldEnvOutput, dfOutput, rbOutput) + + testLogger.Info().Msgf("Configuring Data Feeds Cache contract...") + forwarderAddress, forwarderErr := crecontracts.FindAddressesForChain(fullCldEnvOutput.Environment.ExistingAddresses, chainSelector, keystone_changeset.KeystoneForwarder.String()) + require.NoError(t, forwarderErr, "failed to find Forwarder address for chain %d", chainSelector) + + uniqueWorkflowName := baseWorkflowTestConfig.WorkflowName + "-" + bcOutput.BlockchainOutput.ChainID + "-" + uuid.New().String()[0:4] // e.g. 'por-workflow-1337-5f37_config' + configInput := &cre.ConfigureDataFeedsCacheInput{ + CldEnv: fullCldEnvOutput.Environment, + ChainSelector: chainSelector, + FeedIDs: []string{feedID}, + Descriptions: []string{"PoR test feed"}, + DataFeedsCacheAddress: dataFeedsCacheAddress, + AdminAddress: bcOutput.SethClient.MustGetRootKeyAddress(), + AllowedSenders: []common.Address{forwarderAddress}, + AllowedWorkflowNames: []string{uniqueWorkflowName}, + AllowedWorkflowOwners: []common.Address{bcOutput.SethClient.MustGetRootKeyAddress()}, + } + _, dfConfigErr := crecontracts.ConfigureDataFeedsCache(testLogger, configInput) + require.NoError(t, dfConfigErr, "failed to configure Data Feeds Cache contract") + testLogger.Info().Msg("Data Feeds Cache contract configured successfully.") + + testLogger.Info().Msg("Creating PoR workflow configuration file...") + workflowConfig := portypes.WorkflowConfig{ + ChainFamily: chainFamily, + ChainID: strconv.FormatUint(chainID, 10), + BalanceReaderConfig: portypes.BalanceReaderConfig{ + BalanceReaderAddress: readBalancesAddress.Hex(), + }, + ComputeConfig: portypes.ComputeConfig{ + FeedID: feedID, + URL: priceProvider.URL(), + DataFeedsCacheAddress: dataFeedsCacheAddress.Hex(), + WriteTargetName: corevm.GenerateWriteTargetName(chainID), + }, + } + workflowFileLocation := baseWorkflowTestConfig.WorkflowFileLocation + compressedWorkflowWasmPath, workflowConfigFilePath := createWorkflowArtifacts(t, testLogger, uniqueWorkflowName, &workflowConfig, workflowFileLocation) + + testLogger.Info().Msgf("Registering PoR workflow on chain %d (%d)", chainID, chainSelector) + workflowRegistryAddress, workflowRegistryErr := crecontracts.FindAddressesForChain( + fullCldEnvOutput.Environment.ExistingAddresses, + homeChainSelector, // it should live only on one chain, it is not deployed to all chains + keystone_changeset.WorkflowRegistry.String(), + ) + require.NoError(t, workflowRegistryErr, "failed to find Workflow Registry address.") + testLogger.Info().Msgf("Workflow Registry contract found at chain selector %d at %s", homeChainSelector, workflowRegistryAddress) + + workflowRegConfig := &WorkflowRegistrationConfig{ + WorkflowName: uniqueWorkflowName, + WorkflowLocation: workflowFileLocation, + ConfigFilePath: workflowConfigFilePath, + CompressedWasmPath: compressedWorkflowWasmPath, + WorkflowRegistryAddr: workflowRegistryAddress, + DonID: testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, + ContainerTargetDir: creworkflow.DefaultWorkflowTargetDir, + } + registerWorkflow(t.Context(), t, workflowRegConfig, testEnv.WrappedBlockchainOutputs[0].SethClient, testLogger) + + // AFTER TEST + t.Cleanup(func() { + deleteWorkflows(t, uniqueWorkflowName, workflowConfigFilePath, compressedWorkflowWasmPath, blockchainOutputs, workflowRegistryAddress) + }) + } + /* + START THE VALIDATION PHASE + Check whether each feed has been updated with the expected prices, which workflow fetches from the price provider + */ + validatePoRPrices(t, testEnv, priceProvider, baseWorkflowTestConfig) +} + +/* +Creates .yaml workflow configuration file. +It stores the values used by a workflow (main.go), +(i.e. feedID, read/write contract addresses) + +The values are written to types.WorkflowConfig. +The method returns the absolute path to the created config file. +*/ +func createPoRWorkflowConfigFile(workflowName string, workflowConfig *portypes.WorkflowConfig) (string, error) { + feedIDToUse, fIDerr := validateAndFormatFeedID(workflowConfig) + if fIDerr != nil { + return "", errors.Wrap(fIDerr, "failed to validate and format feed ID") + } + workflowConfig.FeedID = feedIDToUse + + // Write workflow config to a .yaml file + configMarshalled, err := yaml.Marshal(workflowConfig) + if err != nil { + return "", errors.Wrap(err, "failed to marshal workflow config") + } + workflowSuffix := "_config.yaml" + workflowConfigOutputFile := workflowName + workflowSuffix + + // remove the duplicate if it already exists + _, statErr := os.Stat(workflowConfigOutputFile) + if statErr == nil { + if err := os.Remove(workflowConfigOutputFile); err != nil { + return "", errors.Wrap(err, "failed to remove existing output file") + } + } + + if err := os.WriteFile(workflowConfigOutputFile, configMarshalled, 0644); err != nil { //nolint:gosec // G306: we want it to be readable by everyone + return "", errors.Wrap(err, "failed to write output file") + } + + outputFileAbsPath, outputFileAbsPathErr := filepath.Abs(workflowConfigOutputFile) + if outputFileAbsPathErr != nil { + return "", errors.Wrap(outputFileAbsPathErr, "failed to get absolute path of the config file") + } + + return outputFileAbsPath, nil +} + +func validateAndFormatFeedID(workflowConfig *portypes.WorkflowConfig) (string, error) { + feedID := workflowConfig.FeedID + + // validate and format feed ID to fit 32 bytes + cleanFeedID := strings.TrimPrefix(feedID, "0x") + feedIDLength := len(cleanFeedID) + if feedIDLength < 32 { + return "", errors.Errorf("feed ID must be at least 32 characters long, but was %d", feedIDLength) + } + + if feedIDLength > 32 { + cleanFeedID = cleanFeedID[:32] + } + + // override feed ID in workflow config to ensure it is exactly 32 bytes + feedIDToUse := "0x" + cleanFeedID + return feedIDToUse, nil +} + +// validatePoRPrices validates that all feeds receive the expected prices from the price provider +func validatePoRPrices(t *testing.T, testEnv *TestEnvironment, priceProvider PriceProvider, config *WorkflowTestConfig) { + t.Helper() + eg := &errgroup.Group{} + + for idx, bcOutput := range testEnv.WrappedBlockchainOutputs { + if bcOutput.BlockchainOutput.Type == blockchain.FamilySolana { + continue + } + eg.Go(func() error { + feedID := config.FeedIDs[idx] + testEnv.Logger.Info().Msgf("Waiting for feed %s to update...", feedID) + + dataFeedsCacheAddresses, dataFeedsCacheErr := crecontracts.FindAddressesForChain( + testEnv.FullCldEnvOutput.Environment.ExistingAddresses, + bcOutput.ChainSelector, + df_changeset.DataFeedsCache.String(), + ) + if dataFeedsCacheErr != nil { + return fmt.Errorf("failed to find data feeds cache address for chain %d: %w", bcOutput.ChainID, dataFeedsCacheErr) + } + + dataFeedsCacheInstance, instanceErr := data_feeds_cache.NewDataFeedsCache(dataFeedsCacheAddresses, bcOutput.SethClient.Client) + if instanceErr != nil { + return fmt.Errorf("failed to create data feeds cache instance: %w", instanceErr) + } + + startTime := time.Now() + require.Eventually(t, func() bool { + elapsed := time.Since(startTime).Round(time.Second) + price, err := dataFeedsCacheInstance.GetLatestAnswer(bcOutput.SethClient.NewCallOpts(), [16]byte(common.Hex2Bytes(feedID))) + if err != nil { + testEnv.Logger.Error().Err(err).Msg("failed to get price from Data Feeds Cache contract") + return false + } + + // if there are no more prices to be found, we can stop waiting + return !priceProvider.NextPrice(feedID, price, elapsed) + }, config.Timeout, DefaultValidationInterval, "feed %s did not update, timeout after: %s", feedID, config.Timeout) + + expected := priceProvider.ExpectedPrices(feedID) + actual := priceProvider.ActualPrices(feedID) + + if len(expected) != len(actual) { + return fmt.Errorf("expected %d prices, got %d", len(expected), len(actual)) + } + + for i := range expected { + if expected[i].Cmp(actual[i]) != 0 { + return fmt.Errorf("expected price %d, got %d", expected[i], actual[i]) + } + } + + testEnv.Logger.Info().Msgf("All prices were found in the feed %s", feedID) + return nil + }) + } + + err := eg.Wait() + require.NoError(t, err, "price validation failed") + + testEnv.Logger.Info().Msgf("All prices were found for all feeds") +} diff --git a/system-tests/tests/smoke/cre/v2_beholder_test.go b/system-tests/tests/smoke/cre/v2_beholder_test.go new file mode 100644 index 00000000000..624bd6b671f --- /dev/null +++ b/system-tests/tests/smoke/cre/v2_beholder_test.go @@ -0,0 +1,161 @@ +package cre + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/proto" + + common_events "github.com/smartcontractkit/chainlink-protos/workflows/go/common" + workflow_events "github.com/smartcontractkit/chainlink-protos/workflows/go/events" + + keystone_changeset "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + + crecontracts "github.com/smartcontractkit/chainlink/system-tests/lib/cre/contracts" + creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" +) + +type BeholderWorkflowConfig struct{} + +func ExecuteBeholderTest(t *testing.T, testEnv *TestEnvironment) { + testLogger := framework.L + timeout := 2 * time.Minute + homeChainSelector := testEnv.WrappedBlockchainOutputs[0].ChainSelector + workflowFileLocation := "../../../../core/scripts/cre/environment/examples/workflows/v2/cron/main.go" + workflowName := "cronbeholder" + + testLogger.Info().Msg("Starting Beholder...") + bErr := startBeholderStackIfIsNotRunning(DefaultBeholderStackCacheFile, DefaultEnvironmentDir) + require.NoError(t, bErr, "failed to start Beholder") + + chipConfig, chipErr := loadBeholderStackCache() + require.NoError(t, chipErr, "failed to load chip ingress cache") + require.NotNil(t, chipConfig.ChipIngress.Output.RedPanda.KafkaExternalURL, "kafka external url is not set in the cache") + require.NotEmpty(t, chipConfig.Kafka.Topics, "kafka topics are not set in the cache") + + compressedWorkflowWasmPath, _ := createWorkflowArtifacts(t, testLogger, workflowName, &None{}, workflowFileLocation) + + workflowRegistryAddress, workflowRegistryErr := crecontracts.FindAddressesForChain(testEnv.FullCldEnvOutput.Environment.ExistingAddresses, homeChainSelector, keystone_changeset.WorkflowRegistry.String()) + require.NoError(t, workflowRegistryErr, "failed to find workflow registry address for chain %d", testEnv.WrappedBlockchainOutputs[0].ChainID) + + t.Cleanup(func() { + deleteWorkflows(t, workflowName, "", compressedWorkflowWasmPath, testEnv.WrappedBlockchainOutputs, workflowRegistryAddress) + }) + + workflowRegConfig := &WorkflowRegistrationConfig{ + WorkflowName: workflowName, + WorkflowLocation: workflowFileLocation, + CompressedWasmPath: compressedWorkflowWasmPath, + WorkflowRegistryAddr: workflowRegistryAddress, + DonID: testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, + ContainerTargetDir: creworkflow.DefaultWorkflowTargetDir, + } + registerWorkflow(t.Context(), t, workflowRegConfig, testEnv.WrappedBlockchainOutputs[0].SethClient, testLogger) + + listenerCtx, cancelListener := context.WithTimeout(t.Context(), 2*time.Minute) + t.Cleanup(func() { + cancelListener() + }) + + kafkaErrChan := make(chan error, 1) + messageChan := make(chan proto.Message, 10) + + // We are interested in UserLogs (successful execution) + // or BaseMessage with specific error message (engine initialization failure) + messageTypes := map[string]func() proto.Message{ + "workflows.v1.UserLogs": func() proto.Message { + return &workflow_events.UserLogs{} + }, + "BaseMessage": func() proto.Message { + return &common_events.BaseMessage{} + }, + } + + // Start listening for messages in the background + go func() { + listenForKafkaMessages(listenerCtx, testLogger, chipConfig.ChipIngress.Output.RedPanda.KafkaExternalURL, chipConfig.Kafka.Topics[0], messageTypes, messageChan, kafkaErrChan) + }() + + expectedUserLog := "Amazing workflow user log" + + foundExpectedLog := make(chan bool, 1) // Channel to signal when expected log is found + foundErrorLog := make(chan bool, 1) // Channel to signal when engine initialization failure is detected + receivedUserLogs := 0 + // Start message processor goroutine + go func() { + for { + select { + case <-listenerCtx.Done(): + return + case msg := <-messageChan: + // Process received messages + switch typedMsg := msg.(type) { + case *common_events.BaseMessage: + if strings.Contains(typedMsg.Msg, "Workflow Engine initialization failed") { + foundErrorLog <- true + } + case *workflow_events.UserLogs: + testLogger.Info(). + Msg("🎉 Received UserLogs message in test") + receivedUserLogs++ + + for _, logLine := range typedMsg.LogLines { + if strings.Contains(logLine.Message, expectedUserLog) { + testLogger.Info(). + Str("expected_log", expectedUserLog). + Str("found_message", strings.TrimSpace(logLine.Message)). + Msg("🎯 Found expected user log message!") + + select { + case foundExpectedLog <- true: + default: // Channel might already have a value + } + return // Exit the processor goroutine + } + testLogger.Warn(). + Str("expected_log", expectedUserLog). + Str("found_message", strings.TrimSpace(logLine.Message)). + Msg("Received UserLogs message, but it does not match expected log") + } + default: + // ignore other message types + } + } + } + }() + + testLogger.Info(). + Str("expected_log", expectedUserLog). + Dur("timeout", timeout). + Msg("Waiting for expected user log message or timeout") + + // Wait for either the expected log to be found, or engine initialization failure to be detected, or timeout (2 minutes) + select { + case <-foundExpectedLog: + testLogger.Info(). + Str("expected_log", expectedUserLog). + Msg("✅ Test completed successfully - found expected user log message!") + return + case <-foundErrorLog: + require.Fail(t, "Test completed with error - found engine initialization failure message!") + case <-time.After(timeout): + testLogger.Error().Msg("Timed out waiting for expected user log message") + if receivedUserLogs > 0 { + testLogger.Warn().Int("received_user_logs", receivedUserLogs).Msg("Received some UserLogs messages, but none matched expected log") + } else { + testLogger.Warn().Msg("Did not receive any UserLogs messages") + } + require.Failf(t, "Timed out waiting for expected user log message", "Expected user log message: '%s' not found after %s", expectedUserLog, timeout.String()) + case err := <-kafkaErrChan: + testLogger.Error().Err(err).Msg("Kafka listener encountered an error during execution. Ensure Beholder is running and accessible.") + require.Fail(t, "Kafka listener failed", err.Error()) + } + + testLogger.Info().Msg("Beholder test completed") +} diff --git a/system-tests/tests/smoke/cre/v2_http_trigger_action_test.go b/system-tests/tests/smoke/cre/v2_http_trigger_action_test.go new file mode 100644 index 00000000000..5b63f730e26 --- /dev/null +++ b/system-tests/tests/smoke/cre/v2_http_trigger_action_test.go @@ -0,0 +1,285 @@ +package cre + +import ( + "bytes" + "crypto/ecdsa" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + jsonrpc "github.com/smartcontractkit/chainlink-common/pkg/jsonrpc2" + gateway_common "github.com/smartcontractkit/chainlink-common/pkg/types/gateway" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + "github.com/smartcontractkit/chainlink-testing-framework/framework/components/fake" + + keystone_changeset "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" + + "github.com/smartcontractkit/chainlink/v2/core/utils" + + crecontracts "github.com/smartcontractkit/chainlink/system-tests/lib/cre/contracts" + creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" + libcrypto "github.com/smartcontractkit/chainlink/system-tests/lib/crypto" +) + +func ExecuteHTTPTriggerActionTest(t *testing.T, testEnv *TestEnvironment) { + HTTPWorkflowFileLocation := "../../../../core/scripts/cre/environment/examples/workflows/v2/http_simple/main.go" + var testLogger = framework.L + + homeChainSelector := testEnv.WrappedBlockchainOutputs[0].ChainSelector + testEnv.Logger.Info().Msg("Starting HTTP trigger and action test...") + + publicKeyAddr, signingKey, newKeysErr := libcrypto.GenerateNewKeyPair() + require.NoError(t, newKeysErr, "failed to generate new public key") + + fakeServer, err := startTestOrderServer(t, testEnv.Config.Fake.Port) + require.NoError(t, err, "failed to start fake HTTP server") + + uniqueWorkflowName := "http-trigger-action-test-" + uuid.New().String()[0:8] + httpWorkflowConfig := HTTPWorkflowConfig{ + AuthorizedKey: publicKeyAddr, + URL: fakeServer.BaseURLHost, + } + + compressedWorkflowWasmPath, httpConfigFilePath := createWorkflowArtifacts(t, testLogger, uniqueWorkflowName, &httpWorkflowConfig, HTTPWorkflowFileLocation) + + testLogger.Info().Msg("Registering HTTP workflow") + workflowRegistryAddress, err := crecontracts.FindAddressesForChain(testEnv.FullCldEnvOutput.Environment.ExistingAddresses, homeChainSelector, keystone_changeset.WorkflowRegistry.String()) + require.NoError(t, err, "failed to find workflow registry address for chain %d", homeChainSelector) + + regConfig := &WorkflowRegistrationConfig{ + WorkflowName: uniqueWorkflowName, + WorkflowLocation: HTTPWorkflowFileLocation, + ConfigFilePath: httpConfigFilePath, + CompressedWasmPath: compressedWorkflowWasmPath, + WorkflowRegistryAddr: workflowRegistryAddress, + DonID: testEnv.FullCldEnvOutput.DonTopology.DonsWithMetadata[0].ID, + ContainerTargetDir: creworkflow.DefaultWorkflowTargetDir, + } + registerWorkflow(t.Context(), t, regConfig, testEnv.WrappedBlockchainOutputs[0].SethClient, testEnv.Logger) + + testEnv.Logger.Info().Msg("Getting gateway configuration...") + require.NotEmpty(t, testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations, "expected at least one gateway configuration") + newGatewayURL := testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Protocol + "://" + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Host + ":" + strconv.Itoa(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.ExternalPort) + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Path + gatewayURL, err := url.Parse(newGatewayURL) + require.NoError(t, err, "failed to parse gateway URL") + + workflowOwner, err := crypto.HexToECDSA(testEnv.WrappedBlockchainOutputs[0].DeployerPrivateKey) + require.NoError(t, err, "failed to convert private key to ECDSA") + workflowOwnerAddress := strings.ToLower(crypto.PubkeyToAddress(workflowOwner.PublicKey).Hex()) + + testEnv.Logger.Info().Msgf("Workflow owner address: %s", workflowOwnerAddress) + testEnv.Logger.Info().Msgf("Workflow name: %s", uniqueWorkflowName) + + executeHTTPTriggerRequest(t, testEnv, gatewayURL, uniqueWorkflowName, signingKey, workflowOwnerAddress) + validateHTTPWorkflowRequest(t, testEnv) + + testEnv.Logger.Info().Msg("HTTP trigger and action test completed successfully") + + // AFTER TEST + t.Cleanup(func() { + deleteWorkflows(t, uniqueWorkflowName, httpConfigFilePath, compressedWorkflowWasmPath, testEnv.WrappedBlockchainOutputs, workflowRegistryAddress) + }) +} + +// executeHTTPTriggerRequest executes an HTTP trigger request and waits for successful response +func executeHTTPTriggerRequest(t *testing.T, testEnv *TestEnvironment, gatewayURL *url.URL, workflowName string, singingKey *ecdsa.PrivateKey, workflowOwnerAddress string) { + var finalResponse jsonrpc.Response[json.RawMessage] + var triggerRequest jsonrpc.Request[json.RawMessage] + + require.Eventually(t, func() bool { + triggerRequest = createHTTPTriggerRequestWithKey(t, workflowName, workflowOwnerAddress, singingKey) + triggerRequestBody, err := json.Marshal(triggerRequest) + if err != nil { + testEnv.Logger.Warn().Msgf("Failed to marshal trigger request: %v", err) + return false + } + + testEnv.Logger.Info().Msgf("Gateway URL: %s", gatewayURL.String()) + testEnv.Logger.Info().Msg("Executing HTTP trigger request with retries until workflow is loaded...") + + req, err := http.NewRequestWithContext(t.Context(), "POST", gatewayURL.String(), bytes.NewBuffer(triggerRequestBody)) + if err != nil { + testEnv.Logger.Warn().Msgf("Failed to create request: %v", err) + return false + } + req.Header.Set("Content-Type", "application/jsonrpc") + req.Header.Set("Accept", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + testEnv.Logger.Warn().Msgf("Failed to execute request: %v", err) + return false + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + testEnv.Logger.Warn().Msgf("Failed to read response body: %v", err) + return false + } + + testEnv.Logger.Info().Msgf("HTTP trigger response (status %d): %s", resp.StatusCode, string(body)) + + if resp.StatusCode != http.StatusOK { + testEnv.Logger.Warn().Msgf("Gateway returned status %d, retrying...", resp.StatusCode) + return false + } + + err = json.Unmarshal(body, &finalResponse) + if err != nil { + testEnv.Logger.Warn().Msgf("Failed to unmarshal response: %v", err) + return false + } + + if finalResponse.Error != nil { + testEnv.Logger.Warn().Msgf("JSON-RPC error in response: %v", finalResponse.Error) + return false + } + + testEnv.Logger.Info().Msg("Successfully received 200 OK response from gateway") + return true + }, tests.WaitTimeout(t), DefaultRetryInterval, "gateway should respond with 200 OK and valid response once workflow is loaded (ensure the workflow is loaded)") + + require.Equal(t, jsonrpc.JsonRpcVersion, finalResponse.Version, "expected JSON-RPC version %s, got %s", jsonrpc.JsonRpcVersion, finalResponse.Version) + require.Equal(t, triggerRequest.ID, finalResponse.ID, "expected response ID %s, got %s", triggerRequest.ID, finalResponse.ID) + require.Nil(t, finalResponse.Error, "unexpected error in response: %v", finalResponse.Error) +} + +// validateHTTPWorkflowRequest validates that the workflow made the expected HTTP request +func validateHTTPWorkflowRequest(t *testing.T, testEnv *TestEnvironment) { + require.Eventually(t, func() bool { + records, err := fake.R.Get("POST", "/orders") + return err == nil && len(records) > 0 + }, tests.WaitTimeout(t), DefaultRetryInterval, "workflow should have made at least one HTTP request to mock server") + + records, err := fake.R.Get("POST", "/orders") + require.NoError(t, err, "failed to get recorded requests") + require.NotEmpty(t, records, "no requests recorded") + + recordedRequest := records[0] + testEnv.Logger.Info().Msgf("Recorded request: %+v", recordedRequest) + + require.Equal(t, "POST", recordedRequest.Method, "expected POST method") + require.Equal(t, "/orders", recordedRequest.Path, "expected /orders endpoint") + require.Equal(t, "application/json", recordedRequest.Headers.Get("Content-Type"), "expected JSON content type") + + var workflowRequestBody map[string]interface{} + err = json.Unmarshal([]byte(recordedRequest.ReqBody), &workflowRequestBody) + require.NoError(t, err, "request body should be valid JSON") + + require.Equal(t, "test-customer", workflowRequestBody["customer"], "expected customer field") + require.Equal(t, "large", workflowRequestBody["size"], "expected size field") + require.Contains(t, workflowRequestBody, "toppings", "expected toppings field") +} + +type HTTPWorkflowConfig struct { + AuthorizedKey common.Address `json:"authorizedKey"` + URL string `json:"url"` +} + +func createHTTPWorkflowConfigFile(workflowName string, cfg *HTTPWorkflowConfig) (string, error) { + var testLogger = framework.L + mockServerURL := cfg.URL + parsedURL, urlErr := url.Parse(mockServerURL) + if urlErr != nil { + return "", errors.Wrap(urlErr, "failed to parse HTTP mock server URL") + } + + url := fmt.Sprintf("%s:%s", framework.HostDockerInternal(), parsedURL.Port()) + testLogger.Info().Msgf("Mock server URL transformed from '%s' to '%s' for Docker access", mockServerURL, url) + + // override values in the initial workflow configuration + cfg.URL = url + "/orders" + + configBytes, marshalErr := json.Marshal(cfg) + if marshalErr != nil { + return "", errors.Wrap(marshalErr, "failed to marshal HTTP workflow config") + } + + configFileName := fmt.Sprintf("test_http_workflow_config_%s.json", workflowName) + configPath := filepath.Join(os.TempDir(), configFileName) + + writeErr := os.WriteFile(configPath, configBytes, 0644) //nolint:gosec // this is a test file + if writeErr != nil { + return "", errors.Wrap(writeErr, "failed to write HTTP workflow config file") + } + + return configPath, nil +} + +func createHTTPTriggerRequestWithKey(t *testing.T, workflowName, workflowOwner string, privateKey *ecdsa.PrivateKey) jsonrpc.Request[json.RawMessage] { + triggerPayload := gateway_common.HTTPTriggerRequest{ + Workflow: gateway_common.WorkflowSelector{ + WorkflowOwner: workflowOwner, + WorkflowName: workflowName, + WorkflowTag: "TEMP_TAG", + }, + Input: []byte(`{ + "customer": "test-customer", + "size": "large", + "toppings": ["cheese", "pepperoni"], + "dedupe": false + }`), + } + + payloadBytes, err := json.Marshal(triggerPayload) + require.NoError(t, err) + rawPayload := json.RawMessage(payloadBytes) + + req := jsonrpc.Request[json.RawMessage]{ + Version: jsonrpc.JsonRpcVersion, + Method: gateway_common.MethodWorkflowExecute, + Params: &rawPayload, + ID: "http-trigger-test-" + uuid.New().String()[0:8], + } + + token, err := utils.CreateRequestJWT(req) + require.NoError(t, err) + + tokenString, err := token.SignedString(privateKey) + require.NoError(t, err) + req.Auth = tokenString + + return req +} + +// startTestOrderServer creates a fake HTTP server that records requests and returns proper responses for order endpoints +func startTestOrderServer(t *testing.T, port int) (*fake.Output, error) { + fakeInput := &fake.Input{ + Port: port, + } + + fakeOutput, err := fake.NewFakeDataProvider(fakeInput) + if err != nil { + return nil, err + } + + // Set up the /orders endpoint + response := map[string]interface{}{ + "orderId": "test-order-" + uuid.New().String()[0:8], + "status": "success", + "message": "Order processed successfully", + } + + err = fake.JSON("POST", "/orders", response, 200) + require.NoError(t, err, "failed to set up /orders endpoint") + + framework.L.Info().Msgf("Test order server started on port %d at: %s", port, fakeOutput.BaseURLHost) + return fakeOutput, nil +} diff --git a/system-tests/tests/smoke/cre/v2_vault_don_test.go b/system-tests/tests/smoke/cre/v2_vault_don_test.go new file mode 100644 index 00000000000..eb5159b12ed --- /dev/null +++ b/system-tests/tests/smoke/cre/v2_vault_don_test.go @@ -0,0 +1,284 @@ +package cre + +import ( + "bytes" + "context" + "encoding/json" + "io" + "math/rand" + "net/http" + "net/url" + "strconv" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/encoding/protojson" + + vaultcommon "github.com/smartcontractkit/chainlink-common/pkg/capabilities/actions/vault" + jsonrpc "github.com/smartcontractkit/chainlink-common/pkg/jsonrpc2" + + crevault "github.com/smartcontractkit/chainlink/system-tests/lib/cre/capabilities/vault" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + + vaultapi "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/vault" +) + +func ExecuteVaultTest(t *testing.T, testEnv *TestEnvironment) { + // Skip till we figure out and fix the issues with environment startup on this test + const skipReason = "Skip till the errors with topology TopologyWorkflowGatewayCapabilities are fixed: https://smartcontract-it.atlassian.net/browse/PRIV-160" + t.Skipf("Skipping test for the following reason: %s", skipReason) + /* + BUILD ENVIRONMENT FROM SAVED STATE + */ + var testLogger = framework.L + testLogger.Info().Msg("Getting gateway configuration...") + require.NotEmpty(t, testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations, "expected at least one gateway configuration") + gatewayURL, err := url.Parse(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Protocol + "://" + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Host + ":" + strconv.Itoa(testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.ExternalPort) + testEnv.FullCldEnvOutput.DonTopology.GatewayConnectorOutput.Configurations[0].Incoming.Path) + require.NoError(t, err, "failed to parse gateway URL") + + testLogger.Info().Msgf("Gateway URL: %s", gatewayURL.String()) + + framework.L.Info().Msgf("Sleeping 1 minute to allow the Vault DON to start...") + // TODO: Remove this sleep https://smartcontract-it.atlassian.net/browse/PRIV-154 + time.Sleep(1 * time.Minute) + testLogger.Info().Msgf("Sleep over. Executing test now...") + + secretID := strconv.Itoa(rand.Intn(10000)) // generate a random secret ID for testing + owner := "Owner1" + secretValue := "Secret Value to be stored" + + executeVaultSecretsCreateTest(t, secretValue, secretID, owner, gatewayURL.String()) + + divider := "------------------------------------------------------" + testLogger.Info().Msgf("%s \n%s \n%s \n%s \n%s", divider, divider, divider, divider, divider) + + executeVaultSecretsGetTest(t, secretValue, secretID, owner, gatewayURL.String()) + executeVaultSecretsUpdateTest(t, secretValue, secretID, owner, gatewayURL.String()) +} + +func executeVaultSecretsCreateTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { + framework.L.Info().Msg("Creating secret...") + encryptedSecret, err := crevault.EncryptSecret(secretValue) + require.NoError(t, err, "failed to encrypt secret") + + uniqueRequestID := uuid.New().String() + + secretsCreateRequest := jsonrpc.Request[vaultcommon.CreateSecretsRequest]{ + Version: jsonrpc.JsonRpcVersion, + ID: uniqueRequestID, + Method: vaultapi.MethodSecretsCreate, + Params: &vaultcommon.CreateSecretsRequest{ + RequestId: uniqueRequestID, + EncryptedSecrets: []*vaultcommon.EncryptedSecret{ + { + Id: &vaultcommon.SecretIdentifier{ + Key: secretID, + Owner: owner, + // Namespace: "main", // Uncomment if you want to use namespaces + }, // Note: Namespace is not used in this test, but can be added if needed + EncryptedValue: encryptedSecret, + }, + }, + }, + } + requestBody, err := json.Marshal(secretsCreateRequest) + require.NoError(t, err, "failed to marshal secrets request") + + httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) + framework.L.Info().Msg("Checking jsonResponse structure...") + var jsonResponse jsonrpc.Response[vaultapi.SignedOCRResponse] + err = json.Unmarshal(httpResponseBody, &jsonResponse) + require.NoError(t, err, "failed to unmarshal getResponse") + framework.L.Info().Msgf("JSON Body: %v", jsonResponse) + if jsonResponse.Error != nil { + require.Empty(t, jsonResponse.Error.Error()) + } + require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) + require.Equal(t, vaultapi.MethodSecretsCreate, jsonResponse.Method) + + signedOCRResponse := jsonResponse.Result + framework.L.Info().Msgf("Signed OCR Response: %s", signedOCRResponse.String()) + + // TODO: Verify the authenticity of this signed report, by ensuring that the signatures indeed match the payload + createSecretsResponse := vaultcommon.CreateSecretsResponse{} + err = protojson.Unmarshal(signedOCRResponse.Payload, &createSecretsResponse) + require.NoError(t, err, "failed to decode payload into CreateSecretsResponse proto") + framework.L.Info().Msgf("CreateSecretsResponse decoded as: %s", createSecretsResponse.String()) + + require.Len(t, createSecretsResponse.Responses, 1, "Expected one item in the response") + result0 := createSecretsResponse.GetResponses()[0] + require.Empty(t, result0.GetError()) + require.Equal(t, secretID, result0.GetId().Key) + require.Equal(t, owner, result0.GetId().Owner) + + framework.L.Info().Msg("Secret created successfully") +} + +func executeVaultSecretsUpdateTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { + framework.L.Info().Msg("Updating secret...") + uniqueRequestID := uuid.New().String() + + encryptedSecret, secretErr := crevault.EncryptSecret(secretValue) + require.NoError(t, secretErr, "failed to encrypt secret") + + secretsUpdateRequest := jsonrpc.Request[vaultcommon.UpdateSecretsRequest]{ + Version: jsonrpc.JsonRpcVersion, + ID: uniqueRequestID, + Method: vaultapi.MethodSecretsUpdate, + Params: &vaultcommon.UpdateSecretsRequest{ + RequestId: uniqueRequestID, + EncryptedSecrets: []*vaultcommon.EncryptedSecret{ + { + Id: &vaultcommon.SecretIdentifier{ + Key: secretID, + Owner: owner, + }, + EncryptedValue: encryptedSecret, + }, + { + Id: &vaultcommon.SecretIdentifier{ + Key: "invalid", + Owner: "invalid", + }, + EncryptedValue: encryptedSecret, + }, + }, + }, + } + requestBody, err := json.Marshal(secretsUpdateRequest) + require.NoError(t, err, "failed to marshal secrets request") + + httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) + framework.L.Info().Msg("Checking jsonResponse structure...") + var jsonResponse jsonrpc.Response[vaultapi.SignedOCRResponse] + err = json.Unmarshal(httpResponseBody, &jsonResponse) + require.NoError(t, err, "failed to unmarshal getResponse") + framework.L.Info().Msgf("JSON Body: %v", jsonResponse) + if jsonResponse.Error != nil { + require.Empty(t, jsonResponse.Error.Error()) + } + + require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) + require.Equal(t, vaultapi.MethodSecretsUpdate, jsonResponse.Method) + + signedOCRResponse := jsonResponse.Result + framework.L.Info().Msgf("Signed OCR Response: %s", signedOCRResponse.String()) + + // TODO: Verify the authenticity of this signed report, by ensuring that the signatures indeed match the payload + + updateSecretsResponse := vaultcommon.UpdateSecretsResponse{} + err = protojson.Unmarshal(signedOCRResponse.Payload, &updateSecretsResponse) + require.NoError(t, err, "failed to decode payload into UpdateSecretsResponse proto") + framework.L.Info().Msgf("UpdateSecretsResponse decoded as: %s", updateSecretsResponse.String()) + + require.Len(t, updateSecretsResponse.Responses, 2, "Expected one item in the response") + result0 := updateSecretsResponse.GetResponses()[0] + require.Empty(t, result0.GetError()) + require.Equal(t, secretID, result0.GetId().Key) + require.Equal(t, owner, result0.GetId().Owner) + + result1 := updateSecretsResponse.GetResponses()[1] + require.Contains(t, result1.Error, "key does not exist") + + framework.L.Info().Msg("Secret updated successfully") +} + +func executeVaultSecretsGetTest(t *testing.T, secretValue, secretID, owner, gatewayURL string) { + uniqueRequestID := uuid.New().String() + framework.L.Info().Msg("Getting secret...") + secretsGetRequest := jsonrpc.Request[vaultcommon.GetSecretsRequest]{ + Version: jsonrpc.JsonRpcVersion, + Method: vaultapi.MethodSecretsGet, + Params: &vaultcommon.GetSecretsRequest{ + Requests: []*vaultcommon.SecretRequest{ + { + Id: &vaultcommon.SecretIdentifier{ + Key: secretID, + Owner: owner, + }, + }, + }, + }, + ID: uniqueRequestID, + } + requestBody, err := json.Marshal(secretsGetRequest) + require.NoError(t, err, "failed to marshal secrets request") + httpResponseBody := sendVaultRequestToGateway(t, gatewayURL, requestBody) + framework.L.Info().Msg("Checking jsonResponse structure...") + var jsonResponse jsonrpc.Response[json.RawMessage] + err = json.Unmarshal(httpResponseBody, &jsonResponse) + require.NoError(t, err, "failed to unmarshal http response body") + framework.L.Info().Msgf("JSON Body: %v", jsonResponse) + if jsonResponse.Error != nil { + require.Empty(t, jsonResponse.Error.Error()) + } + require.Equal(t, jsonrpc.JsonRpcVersion, jsonResponse.Version) + require.Equal(t, vaultapi.MethodSecretsGet, jsonResponse.Method) + + /* + * The json unmarshaling is not compatible with the proto oneof in vaultcommon.SecretResponse + * The Data and Error fields are oneof fields in the proto definition, but when unmarshaling to JSON, + * the JSON unmarshaler does not handle oneof fields correctly, leading to issues. + * To work around this, we define custom response types that match the expected structure. + * This allows us to unmarshal the JSON response correctly and access the fields as expected. + */ + type EncryptedShares struct { + Shares []string `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` + EncryptionKey string `protobuf:"bytes,2,opt,name=encryption_key,json=encryptionKey,proto3" json:"encryption_key,omitempty"` + } + type SecretData struct { + EncryptedValue string `protobuf:"bytes,2,opt,name=encrypted_value,json=encryptedValue,proto3" json:"encrypted_value,omitempty"` + EncryptedDecryptionKeyShares []*EncryptedShares `protobuf:"bytes,3,rep,name=encrypted_decryption_key_shares,json=encryptedDecryptionKeyShares,proto3" json:"encrypted_decryption_key_shares,omitempty"` + } + type SecretResponse struct { + ID *vaultcommon.SecretIdentifier `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Data *SecretData `protobuf:"bytes,2,opt,name=data,proto3"` + Error string `protobuf:"bytes,3,opt,name=error,proto3"` + } + type GetSecretsResponse struct { + Responses []*SecretResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + } + /* + * + * + * + * + */ + + var getSecretsResponse GetSecretsResponse + err = json.Unmarshal(*jsonResponse.Result, &getSecretsResponse) + require.NoError(t, err, "failed to unmarshal getResponse") + + require.Len(t, getSecretsResponse.Responses, 1, "Expected one secret in the response") + result0 := getSecretsResponse.Responses[0] + require.Empty(t, result0.Error) + require.Equal(t, secretID, result0.ID.Key) + require.Equal(t, owner, result0.ID.Owner) + + framework.L.Info().Msg("Secret get successful") +} + +func sendVaultRequestToGateway(t *testing.T, gatewayURL string, requestBody []byte) []byte { + framework.L.Info().Msgf("Request Body: %s", string(requestBody)) + req, err := http.NewRequestWithContext(context.Background(), "POST", gatewayURL, bytes.NewBuffer(requestBody)) + require.NoError(t, err, "failed to create request") + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "failed to execute request") + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err, "failed to read jsonResponse body") + framework.L.Info().Msgf("Response Body: %s", string(body)) + + require.Equal(t, http.StatusOK, resp.StatusCode, "Gateway endpoint should respond with 200 OK") + return body +}