diff --git a/.changeset/nervous-shrimps-refuse.md b/.changeset/nervous-shrimps-refuse.md new file mode 100644 index 00000000000..e097c11fe71 --- /dev/null +++ b/.changeset/nervous-shrimps-refuse.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#added LLO plugin channel adder support diff --git a/core/capabilities/integration_tests/keystone/llo_feed_test.go b/core/capabilities/integration_tests/keystone/llo_feed_test.go index b2ef9635eae..a853628ba54 100644 --- a/core/capabilities/integration_tests/keystone/llo_feed_test.go +++ b/core/capabilities/integration_tests/keystone/llo_feed_test.go @@ -128,7 +128,7 @@ func MakeOCRTriggerEvent(lggr logger.Logger, reports *datastreams.LLOStreamsTrig } // Encode the report to bytes - reportBytes, err := reportCodec.Encode(report, channelDef) + reportBytes, err := reportCodec.Encode(report, channelDef, nil) if err != nil { return nil, "", fmt.Errorf("failed to encode report: %w", err) } diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go index d2d86f48bdb..45308a5d8d1 100644 --- a/core/internal/testutils/testutils.go +++ b/core/internal/testutils/testutils.go @@ -167,7 +167,17 @@ func WaitForLogMessage(t *testing.T, observedLogs *observer.ObservedLogs, msg st func WaitForLogMessageWithField(t *testing.T, observedLogs *observer.ObservedLogs, msg, field, value string) (le observer.LoggedEntry) { RequireEventually(t, func() bool { for _, l := range observedLogs.All() { - if strings.Contains(l.Message, msg) && strings.Contains(l.ContextMap()[field].(string), value) { + if !strings.Contains(l.Message, msg) { + continue + } + ctxMap := l.ContextMap() + fieldValue, exists := ctxMap[field] + if !exists { + continue + } + // Convert field value to string for comparison, handling all types + fieldValueStr := fmt.Sprintf("%v", fieldValue) + if strings.Contains(fieldValueStr, value) { le = l return true } diff --git a/core/scripts/go.mod b/core/scripts/go.mod index a9ea42ed5c7..3ad1b2e450e 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -47,11 +47,11 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chainlink-automation v0.8.1 github.com/smartcontractkit/chainlink-ccip v0.1.1-solana.0.20251128020529-88d93b01d749 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 + github.com/smartcontractkit/chainlink-data-streams v0.1.7 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20251124151448-0448aefdaab9 github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 github.com/smartcontractkit/chainlink-testing-framework/framework v0.12.1 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 6430a2e0a63..996a41fd9b3 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1634,20 +1634,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/core/services/llo/channeldefinitions/onchain_channel_definition_cache.go b/core/services/llo/channeldefinitions/onchain_channel_definition_cache.go index ba767437eed..854ca9fb97b 100644 --- a/core/services/llo/channeldefinitions/onchain_channel_definition_cache.go +++ b/core/services/llo/channeldefinitions/onchain_channel_definition_cache.go @@ -12,6 +12,8 @@ import ( "maps" "math/big" "net/http" + "net/url" + "sort" "strconv" "strings" "sync" @@ -41,16 +43,44 @@ const ( MaxChannelDefinitionsFileSize = 25 * 1024 * 1024 // 25MB // How often we query logpoller for new logs defaultLogPollInterval = 1 * time.Second - // How often we check for failed persistence and attempt to save again + // dbPersistLoopInterval is the interval at which we check for failed persistence and attempt to save again dbPersistLoopInterval = 1 * time.Second - + // defaultFetchTimeout is the default timeout for fetching channel definitions. + defaultFetchTimeout = 15 * time.Second + // fetchRetryTimeout is the timeout for retrying to fetch channel definitions. + fetchRetryTimeout = 4 * defaultFetchTimeout + + // MaxChannelsPerAdder is the maximum number of channels allowed per adder source. This limit + // is enforced based on existing channels from the same source in currentDefinitions plus new + // channels being added incrementally. The limit check occurs during processing, not on the + // total file size. + MaxChannelsPerAdder = 100 + + // newChannelDefinitionEventName is the ABI event name for NewChannelDefinition events. newChannelDefinitionEventName = "NewChannelDefinition" + // channelDefinitionAddedEventName is the ABI event name for ChannelDefinitionAdded events. + channelDefinitionAddedEventName = "ChannelDefinitionAdded" + + // SourceUndefined represents an undefined channel definition source. + SourceUndefined uint32 = 0 + // SourceOwner represents the owner source for channel definitions, which has full authority. + SourceOwner uint32 = 1 + + // SingleChannelDefinitionsFormat is the format of the channel definitions for a single source. + SingleChannelDefinitionsFormat uint32 = 0 + + // MultiChannelDefinitionsFormat is the format of the channel definitions for multiple sources. + MultiChannelDefinitionsFormat uint32 = 1 ) var ( + // channelConfigStoreABI is the parsed ABI for the ChannelConfigStore contract. channelConfigStoreABI abi.ABI - NewChannelDefinition = (channel_config_store.ChannelConfigStoreNewChannelDefinition{}).Topic() - + // NewChannelDefinition is the topic hash for the NewChannelDefinition event. + NewChannelDefinition = (channel_config_store.ChannelConfigStoreNewChannelDefinition{}).Topic() + // ChannelDefinitionAdded is the topic hash for the ChannelDefinitionAdded event. + ChannelDefinitionAdded = (channel_config_store.ChannelConfigStoreChannelDefinitionAdded{}).Topic() + // NoLimitSortAsc is a query configuration that sorts results by sequence in ascending order with no limit. NoLimitSortAsc = query.NewLimitAndSort(query.Limit{}, query.NewSortBySequence(query.Asc)) ) @@ -64,12 +94,14 @@ func init() { type ChannelDefinitionCacheORM interface { LoadChannelDefinitions(ctx context.Context, addr common.Address, donID uint32) (pd *types.PersistedDefinitions, err error) - StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns llotypes.ChannelDefinitions, blockNum int64) (err error) + StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns json.RawMessage, blockNum int64, format uint32) (err error) CleanupChannelDefinitions(ctx context.Context, addr common.Address, donID uint32) error } var _ llotypes.ChannelDefinitionCache = &channelDefinitionCache{} +// LogPoller is an interface for querying blockchain logs. It provides methods to get the latest block, +// filter logs by expressions, and manage log filters. type LogPoller interface { LatestBlock(ctx context.Context) (logpoller.Block, error) FilteredLogs(ctx context.Context, filter []query.Expression, limitAndSort query.LimitAndSort, queryName string) ([]logpoller.Log, error) @@ -77,14 +109,29 @@ type LogPoller interface { UnregisterFilter(ctx context.Context, filterName string) error } +// Option is a function type for configuring channelDefinitionCache options. type Option func(*channelDefinitionCache) +// WithLogPollInterval returns an Option that sets the log polling interval for the cache. func WithLogPollInterval(d time.Duration) Option { return func(c *channelDefinitionCache) { c.logPollInterval = d } } +// Definitions holds the in-memory state of channel definitions for a channel definition cache. +// It tracks the latest block number processed, the version (for owner sources), and +// source definitions keyed by source ID. +type Definitions struct { + LastBlockNum int64 // The latest block number from which channel definitions were processed + Version uint32 // The version number from the owner source (only updated for SourceOwner) + Sources map[uint32]types.SourceDefinition // Channel definitions grouped by source ID +} + +// channelDefinitionCache maintains an in-memory cache of channel definitions fetched from on-chain +// events and external URLs. It polls the blockchain for new channel definition events, fetches +// definitions from URLs, verifies SHA hashes, merges definitions from multiple sources according +// to authority rules, and persists source definitions (map[uint32]types.SourceDefinition) to the database. type channelDefinitionCache struct { services.StateMachine @@ -92,111 +139,224 @@ type channelDefinitionCache struct { client HTTPClient httpLimit int64 - filterName string - lp LogPoller - logPollInterval time.Duration - addr common.Address - donID uint32 - donIDTopic common.Hash - filterExprs []query.Expression - lggr logger.SugaredLogger - initialBlockNum int64 + filterName string + lp LogPoller + logPollInterval time.Duration + addr common.Address + donID uint32 + donIDTopic common.Hash + ownerFilterExprs []query.Expression + adderFilterExprs []query.Expression + lggr logger.SugaredLogger + initialBlockNum int64 - newLogMu sync.RWMutex - newLog *channel_config_store.ChannelConfigStoreNewChannelDefinition - newLogCh chan *channel_config_store.ChannelConfigStoreNewChannelDefinition + fetchTriggerCh chan types.Trigger - definitionsMu sync.RWMutex - definitions llotypes.ChannelDefinitions - definitionsVersion uint32 - definitionsBlockNum int64 + definitionsMu sync.RWMutex + definitions Definitions - persistMu sync.RWMutex - persistedVersion uint32 + persistMu sync.RWMutex + persistedBlockNum int64 wg sync.WaitGroup chStop services.StopChan } +// HTTPClient is an interface for making HTTP requests. It matches the standard library's +// http.Client interface. type HTTPClient interface { Do(req *http.Request) (*http.Response, error) } +// NewChannelDefinitionCache creates a new channel definition cache that monitors on-chain events +// for channel definition updates. It configures log polling filters for both owner and adder events, +// sets up the initial state, and applies any provided options. The cache must be started via Start() +// before it begins polling and fetching definitions. func NewChannelDefinitionCache(lggr logger.Logger, orm ChannelDefinitionCacheORM, client HTTPClient, lp logpoller.LogPoller, addr common.Address, donID uint32, fromBlock int64, options ...Option) llotypes.ChannelDefinitionCache { - filterName := types.ChannelDefinitionCacheFilterName(addr, donID) - donIDTopic := common.BigToHash(big.NewInt(int64(donID))) - - exprs := []query.Expression{ - logpoller.NewAddressFilter(addr), - logpoller.NewEventSigFilter(NewChannelDefinition), - logpoller.NewEventByTopicFilter(1, []logpoller.HashedValueComparator{ - {Values: []common.Hash{donIDTopic}, Operator: primitives.Eq}, - }), - // NOTE: Optimize for fast pickup of new channel definitions. On - // Arbitrum, finalization can take tens of minutes - // (https://grafana.ops.prod.cldev.sh/d/e0453cc9-4b4a-41e1-9f01-7c21de805b39/blockchain-finality-and-gas?orgId=1&var-env=All&var-network_name=ethereum-testnet-sepolia-arbitrum-1&var-network_name=ethereum-mainnet-arbitrum-1&from=1732460992641&to=1732547392641) - query.Confidence(primitives.Unconfirmed), - } cdc := &channelDefinitionCache{ orm: orm, client: client, httpLimit: MaxChannelDefinitionsFileSize, - filterName: filterName, + filterName: types.ChannelDefinitionCacheFilterName(addr, donID), lp: lp, logPollInterval: defaultLogPollInterval, addr: addr, donID: donID, - donIDTopic: donIDTopic, - filterExprs: exprs, + donIDTopic: common.BigToHash(big.NewInt(int64(donID))), lggr: logger.Sugared(lggr).Named("ChannelDefinitionCache").With("addr", addr, "fromBlock", fromBlock), - newLogCh: make(chan *channel_config_store.ChannelConfigStoreNewChannelDefinition, 1), + fetchTriggerCh: make(chan types.Trigger, 1), initialBlockNum: fromBlock, chStop: make(chan struct{}), + definitions: Definitions{ + Sources: make(map[uint32]types.SourceDefinition), + }, + } + + cdc.ownerFilterExprs = []query.Expression{ + logpoller.NewAddressFilter(addr), + logpoller.NewEventSigFilter(NewChannelDefinition), + logpoller.NewEventByTopicFilter(1, []logpoller.HashedValueComparator{ + {Values: []common.Hash{cdc.donIDTopic}, Operator: primitives.Eq}, + }), + // Optimize for fast pickup of new channel definitions. + // On Arbitrum, finalization can take a long time. + query.Confidence(primitives.Unconfirmed), + } + + cdc.adderFilterExprs = []query.Expression{ + logpoller.NewAddressFilter(addr), + logpoller.NewEventSigFilter(ChannelDefinitionAdded), + logpoller.NewEventByTopicFilter(1, []logpoller.HashedValueComparator{ + {Values: []common.Hash{cdc.donIDTopic}, Operator: primitives.Eq}, + }), + // Optimize for fast pickup of new channel definitions. + // On Arbitrum, finalization can take a long time. + query.Confidence(primitives.Unconfirmed), } + for _, option := range options { option(cdc) } return cdc } +// Start initializes the channel definition cache by loading persisted state from the database, +// registering logpoller filters, and launching three concurrent asynchronous loops: +// 1. pollChainLoop: Periodically queries logpoller for new channel definition events +// 2. fetchLatestLoop: Receives fetch triggers and coordinates fetching definitions from URLs +// 3. persistLoop: Periodically persists the in-memory source definitions to the database +// All loops run until the cache is stopped via Close(). func (c *channelDefinitionCache) Start(ctx context.Context) error { - // Initial load from DB, then async poll from chain thereafter return c.StartOnce("ChannelDefinitionCache", func() (err error) { - err = c.lp.RegisterFilter(ctx, logpoller.Filter{Name: c.filterName, EventSigs: []common.Hash{NewChannelDefinition}, Topic2: []common.Hash{c.donIDTopic}, Addresses: []common.Address{c.addr}}) + err = c.lp.RegisterFilter(ctx, logpoller.Filter{ + Name: c.filterName, + EventSigs: []common.Hash{NewChannelDefinition, ChannelDefinitionAdded}, + Topic2: []common.Hash{c.donIDTopic}, + Addresses: []common.Address{c.addr}, + }) + if err != nil { return err } - if pd, err := c.orm.LoadChannelDefinitions(ctx, c.addr, c.donID); err != nil { + + var pd *types.PersistedDefinitions + if pd, err = c.orm.LoadChannelDefinitions(ctx, c.addr, c.donID); err != nil { return err - } else if pd != nil { - c.definitions = pd.Definitions - c.definitionsVersion = uint32(pd.Version) + } + + c.definitions.Sources = make(map[uint32]types.SourceDefinition) + if pd != nil { + if pd.Format == MultiChannelDefinitionsFormat { + var sources map[uint32]types.SourceDefinition + if err := json.Unmarshal(pd.Definitions, &sources); err != nil { + return fmt.Errorf("failed to unmarshal definitions: %w", err) + } + c.definitions.Sources = sources + } + c.definitions.Version = pd.Version + c.definitions.LastBlockNum = pd.BlockNum + c.persistedBlockNum = pd.BlockNum if pd.BlockNum+1 > c.initialBlockNum { - c.initialBlockNum = pd.BlockNum + 1 + c.initialBlockNum = pd.BlockNum } - } else { - // ensure non-nil map ready for assignment later - c.definitions = make(llotypes.ChannelDefinitions) - // leave c.initialBlockNum as provided fromBlock argument } + c.wg.Add(3) // We have three concurrent loops // 1. Poll chain for new logs // 2. Fetch latest definitions from URL and verify SHA, according to latest log - // 3. Retry persisting records to DB, if it failed + // 3. Persist definitions to database go c.pollChainLoop() go c.fetchLatestLoop() - go c.failedPersistLoop() + go c.persistLoop() return nil }) } -//////////////////////////////////////////////////////////////////// -// Log Polling -//////////////////////////////////////////////////////////////////// +// blockNumFromUint64 converts a uint64 block number to int64. +// This is safe as block numbers are well within int64 range. +func blockNumFromUint64(blockNum uint64) int64 { + //nolint:gosec // disable G115 + return int64(blockNum) +} + +// unpackOwnerLog unpacks and validates an owner log from logpoller. +// Returns the unpacked log and an error if unpacking or validation fails. +func (c *channelDefinitionCache) unpackOwnerLog(log logpoller.Log) (*channel_config_store.ChannelConfigStoreNewChannelDefinition, error) { + if log.EventSig != NewChannelDefinition { + return nil, fmt.Errorf("log event signature mismatch: expected %x, got %x", NewChannelDefinition, log.EventSig) + } + + unpacked := new(channel_config_store.ChannelConfigStoreNewChannelDefinition) + err := channelConfigStoreABI.UnpackIntoInterface(unpacked, newChannelDefinitionEventName, log.Data) + if err != nil { + return nil, fmt.Errorf("failed to unpack log data: %w", err) + } + + if len(log.Topics) < 2 { + return nil, fmt.Errorf("log missing expected topics: got %d, expected at least 2", len(log.Topics)) + } -// pollChainLoop periodically checks logpoller for new logs + unpacked.DonId = new(big.Int).SetBytes(log.Topics[1]) + //nolint:gosec // disable G115 + unpacked.Raw.BlockNumber = uint64(log.BlockNumber) + + // Validate donID matches + if unpacked.DonId.Cmp(big.NewInt(int64(c.donID))) != 0 { + return nil, fmt.Errorf("donID mismatch: expected %d, got %s", c.donID, unpacked.DonId.String()) + } + + return unpacked, nil +} + +// unpackAdderLog unpacks and validates an adder log from logpoller. +// Returns the unpacked log and an error if unpacking or validation fails. +func (c *channelDefinitionCache) unpackAdderLog(log logpoller.Log) (*channel_config_store.ChannelConfigStoreChannelDefinitionAdded, error) { + if log.EventSig != ChannelDefinitionAdded { + return nil, fmt.Errorf("log event signature mismatch: expected %x, got %x", ChannelDefinitionAdded, log.EventSig) + } + + unpacked := new(channel_config_store.ChannelConfigStoreChannelDefinitionAdded) + err := channelConfigStoreABI.UnpackIntoInterface(unpacked, channelDefinitionAddedEventName, log.Data) + if err != nil { + return nil, fmt.Errorf("failed to unpack adder log data: %w", err) + } + + if len(log.Topics) < 3 { + return nil, fmt.Errorf("adder log missing expected topics: got %d, expected at least 3", len(log.Topics)) + } + + unpacked.DonId = new(big.Int).SetBytes(log.Topics[1]) + //nolint:gosec // disable G115 + unpacked.ChannelAdderId = uint32(new(big.Int).SetBytes(log.Topics[2]).Uint64()) + //nolint:gosec // disable G115 + unpacked.Raw.BlockNumber = uint64(log.BlockNumber) + + // Validate donID matches + if unpacked.DonId.Cmp(big.NewInt(int64(c.donID))) != 0 { + return nil, fmt.Errorf("donID mismatch: expected %d, got %s", c.donID, unpacked.DonId.String()) + } + + return unpacked, nil +} + +// buildFilterExprs builds filter expressions by appending block range filters to base expressions. +func buildFilterExprs(baseExprs []query.Expression, fromBlock, toBlock int64) []query.Expression { + exprs := make([]query.Expression, 0, len(baseExprs)+2) + exprs = append(exprs, baseExprs...) + exprs = append(exprs, + query.Block(strconv.FormatInt(fromBlock, 10), primitives.Gte), + query.Block(strconv.FormatInt(toBlock, 10), primitives.Lte), + ) + return exprs +} + +// pollChainLoop is an asynchronous goroutine that periodically polls logpoller for new channel +// definition events (both owner and adder events). It processes logs sequentially by block number, +// unpacks them into fetch triggers, and sends triggers to the fetch channel for asynchronous +// processing. The loop runs until the cache is stopped, with failures logged and retried on +// the next polling interval. func (c *channelDefinitionCache) pollChainLoop() { defer c.wg.Done() @@ -220,6 +380,11 @@ func (c *channelDefinitionCache) pollChainLoop() { } } +// readLogs queries logpoller for new channel definition events within the block range from +// the last processed block to the latest available block. It fetches adder events +// (ChannelDefinitionAdded) and owner events (NewChannelDefinition) separately, each sorted +// individually by block number (ascending), and processes them separately by passing each +// batch to processLogs for unpacking and trigger generation. func (c *channelDefinitionCache) readLogs(ctx context.Context) (err error) { latestBlock, err := c.lp.LatestBlock(ctx) if errors.Is(err, sql.ErrNoRows) { @@ -228,168 +393,337 @@ func (c *channelDefinitionCache) readLogs(ctx context.Context) (err error) { } else if err != nil { return err } - toBlock := latestBlock.BlockNumber + toBlock := latestBlock.BlockNumber fromBlock := c.scanFromBlockNum() - if toBlock <= fromBlock { return nil } - exprs := make([]query.Expression, 0, len(c.filterExprs)+2) - exprs = append(exprs, c.filterExprs...) - exprs = append(exprs, - query.Block(strconv.FormatInt(fromBlock, 10), primitives.Gte), - query.Block(strconv.FormatInt(toBlock, 10), primitives.Lte), - ) + exprs := buildFilterExprs(c.adderFilterExprs, fromBlock, toBlock) + logs, err := c.lp.FilteredLogs(ctx, exprs, NoLimitSortAsc, "ChannelDefinitionCachePoller - NewAdderChannelDefinition") + if err != nil { + return err + } + c.processLogs(logs) - logs, err := c.lp.FilteredLogs(ctx, exprs, NoLimitSortAsc, "ChannelDefinitionCachePoller - NewChannelDefinition") + exprs = buildFilterExprs(c.ownerFilterExprs, fromBlock, toBlock) + logs, err = c.lp.FilteredLogs(ctx, exprs, NoLimitSortAsc, "ChannelDefinitionCachePoller - NewOwnerChannelDefinition") if err != nil { return err } + c.processLogs(logs) + + return nil +} +// scanFromBlockNum returns the next block number to scan from, ensuring no gaps between +// persisted and in-memory state. +// It returns the max between the in-memory definitions block number and the initial block number. +func (c *channelDefinitionCache) scanFromBlockNum() int64 { + c.definitionsMu.RLock() + defer c.definitionsMu.RUnlock() + return max(c.definitions.LastBlockNum, c.initialBlockNum) +} + +// processLogs unpacks channel definition logs into fetch triggers by extracting URL, SHA hash, +// block number, and source information. It validates logs and handles unpacking errors gracefully, +// continuing to process remaining logs even if individual logs fail. Valid triggers are sent to +// the fetch channel for asynchronous processing by fetchLatestLoop. +func (c *channelDefinitionCache) processLogs(logs []logpoller.Log) { for _, log := range logs { - if log.EventSig != NewChannelDefinition { - // ignore unrecognized logs + var trigger types.Trigger + switch log.EventSig { + case NewChannelDefinition: + unpacked, err := c.unpackOwnerLog(log) + if err != nil { + // Log warning but continue processing other logs + c.lggr.Warnw("Failed to unpack owner log", "err", err, "blockNumber", log.BlockNumber) + continue + } + trigger = types.Trigger{ + Source: SourceOwner, + URL: unpacked.Url, + SHA: unpacked.Sha, + LogIndex: log.LogIndex, + BlockNum: blockNumFromUint64(unpacked.Raw.BlockNumber), + Version: unpacked.Version, + TxHash: log.TxHash, + } + case ChannelDefinitionAdded: + unpacked, err := c.unpackAdderLog(log) + if err != nil { + // Log warning but continue processing other logs + c.lggr.Warnw("Failed to unpack adder log", "err", err, "blockNumber", log.BlockNumber) + continue + } + trigger = types.Trigger{ + Source: unpacked.ChannelAdderId, + URL: unpacked.Url, + SHA: unpacked.Sha, + LogIndex: log.LogIndex, + BlockNum: blockNumFromUint64(unpacked.Raw.BlockNumber), + TxHash: log.TxHash, + } + default: + c.lggr.Warnw("Unknown log event signature", + "blockNumber", log.BlockNumber, "eventSig", log.EventSig, "logHash", log.TxHash.Hex()) continue } - unpacked := new(channel_config_store.ChannelConfigStoreNewChannelDefinition) - - err := channelConfigStoreABI.UnpackIntoInterface(unpacked, newChannelDefinitionEventName, log.Data) - if err != nil { - return fmt.Errorf("failed to unpack log data: %w", err) - } - if len(log.Topics) < 2 { - // should never happen but must guard against unexpected panics - c.lggr.Warnw("Log missing expected topics", "log", log) - continue + c.lggr.Infow("Got new logs", "source", trigger.Source, "url", trigger.URL, "sha", hex.EncodeToString(trigger.SHA[:]), "blockNum", trigger.BlockNum) + select { + case c.fetchTriggerCh <- trigger: + case <-c.chStop: + return } - unpacked.DonId = new(big.Int).SetBytes(log.Topics[1]) + } +} + +type chOpts struct { + FeedID common.Hash `json:"feedID"` +} - //nolint:gosec // disable G115 - unpacked.Raw.BlockNumber = uint64(log.BlockNumber) +// extractFeedID attempts to extract the FeedID from channel options JSON. +// Returns the FeedID if found, or an empty hash if not found or if parsing fails. +func extractFeedID(opts llotypes.ChannelOpts) common.Hash { + if len(opts) == 0 { + return common.Hash{} + } - if unpacked.DonId.Cmp(big.NewInt(int64(c.donID))) != 0 { - // skip logs for other donIDs, shouldn't happen given the - // FilterLogs call, but belts and braces - continue + var optsJSON chOpts + if err := json.Unmarshal(opts, &optsJSON); err != nil { + // If unmarshaling fails, return empty hash (not all channel types have FeedID) + return common.Hash{} + } + return optsJSON.FeedID +} + +// buildFeedIDMap extracts FeedIDs from channel definitions and builds a map +// from FeedID to channel ID for collision detection. +func buildFeedIDMap(definitions llotypes.ChannelDefinitions) map[common.Hash]uint32 { + feedIDToChannelID := make(map[common.Hash]uint32) + for channelID, def := range definitions { + feedID := extractFeedID(def.Opts) + if feedID != (common.Hash{}) { + feedIDToChannelID[feedID] = channelID } + } + return feedIDToChannelID +} - c.newLogMu.Lock() - if c.newLog == nil || unpacked.Version > c.newLog.Version { - c.lggr.Infow("Got new channel definitions from chain", "version", unpacked.Version, "blockNumber", log.BlockNumber, "sha", fmt.Sprintf("%x", unpacked.Sha), "url", unpacked.Url) - c.newLog = unpacked - c.newLogCh <- unpacked +// mergeDefinitions reconciles new channel definitions with the current set according to source +// authority rules. Owner definitions (SourceOwner) have full authority: they can add, update, or +// tombstone (delete) channels. Missing channels in newDefinitions are not automatically removed; +// channels must be explicitly tombstoned to be removed. Adder definitions (non-owner sources) have +// limited authority: they can only add new channels and cannot overwrite or tombstone existing ones. +// +// Adder limits are enforced: +// - MaxChannelsPerAdder: The limit is enforced based on existing channels from the same source +// in currentDefinitions plus new channels being added incrementally. The check occurs before +// each new channel addition. Existing channels that are already in currentDefinitions are +// skipped and do not count toward new additions. +// +// FeedID uniqueness is enforced: +// - All channels must have unique FeedIDs in their options. If a new channel has a FeedID that +// collides with an existing channel, the new channel is logged and skipped (not added). +func (c *channelDefinitionCache) mergeDefinitions(source uint32, currentDefinitions llotypes.ChannelDefinitions, newDefinitions llotypes.ChannelDefinitions, feedIDToChannelID map[common.Hash]uint32) { + // Count the number of channels for adder sources in the current definitions + var numberOfChannels uint32 + if source > SourceOwner { + for _, def := range currentDefinitions { + if def.Source == source { + numberOfChannels++ + } } - c.newLogMu.Unlock() } - return nil -} + // process new definitions in a deterministic order + channelIDs := make([]llotypes.ChannelID, 0, len(newDefinitions)) + for channelID := range newDefinitions { + channelIDs = append(channelIDs, channelID) + } + sort.Slice(channelIDs, func(i, j int) bool { + return channelIDs[i] < channelIDs[j] + }) -func (c *channelDefinitionCache) scanFromBlockNum() int64 { - c.newLogMu.RLock() - defer c.newLogMu.RUnlock() - if c.newLog != nil { - //nolint:gosec // disable G115 - return int64(c.newLog.Raw.BlockNumber) + for _, channelID := range channelIDs { + def := newDefinitions[channelID] + + // Check for FeedID collision before adding the channel + newFeedID := extractFeedID(def.Opts) + if newFeedID != (common.Hash{}) { + if existingChannelID, exists := feedIDToChannelID[newFeedID]; exists && existingChannelID != channelID { + c.lggr.Warnw("feedID collision detected, skipping channel definition", + "channelID", channelID, "feedID", newFeedID.Hex(), "existingChannelID", existingChannelID, "source", source) + continue + } + } + + switch { + case source == SourceOwner: + currentDefinitions[channelID] = def + + // Update FeedID map after adding the channel + if newFeedID != (common.Hash{}) { + feedIDToChannelID[newFeedID] = channelID + } + + case source > SourceOwner: + if def.Tombstone { + c.lggr.Warnw("invalid channel tombstone, cannot be added by source", + "channelID", channelID, "source", source) + continue + } + + if existing, exists := currentDefinitions[channelID]; exists { + if existing.Source != def.Source { + c.lggr.Warnw("channel adder conflict, skipping definition", + "channelID", channelID, "existingSourceID", existing.Source, "newSourceID", def.Source) + } + // Adders do not overwrite existing definitions, they can only add new ones + continue + } + + // stop processing new definitions if the adder limit is exceeded + if numberOfChannels >= MaxChannelsPerAdder { + c.lggr.Warnw("adder limit exceeded, skipping remaining definitions for source", + "source", source, "numberOfChannels", numberOfChannels, "max", MaxChannelsPerAdder) + return + } + + currentDefinitions[channelID] = def + numberOfChannels++ + // Update FeedID map after adding the channel + if newFeedID != (common.Hash{}) { + feedIDToChannelID[newFeedID] = channelID + } + + default: + c.lggr.Warnw("undefined source, skipping definition", + "channelID", channelID, "source", source) + continue + } } - return c.initialBlockNum } -//////////////////////////////////////////////////////////////////// -// Fetch channel definitions from URL based on latest log -//////////////////////////////////////////////////////////////////// - -// fetchLatestLoop waits for new logs and tries on a loop to fetch the channel definitions from the specified url +// fetchLatestLoop is an asynchronous goroutine that receives fetch triggers from the poll chain +// loop via a channel. It coordinates fetching channel definitions from URLs, verifying SHA hashes, +// and storing them in c.definitions.Sources (the source definitions map). +// It spawns a separate goroutine (fetchLoop) for each trigger. func (c *channelDefinitionCache) fetchLatestLoop() { defer c.wg.Done() - var cancel context.CancelFunc = func() {} - + var trigger types.Trigger for { select { - case latest := <-c.newLogCh: - // kill the old retry loop if any - cancel() - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - + case trigger = <-c.fetchTriggerCh: + if trigger.Source == SourceUndefined { + c.lggr.Warnw("Undefined source to fetch", "url", trigger.URL, "source", trigger.Source) + continue + } c.wg.Add(1) - go c.fetchLoop(ctx, latest) + go c.fetchLoop(trigger) case <-c.chStop: - // kill the old retry loop if any - cancel() return } } } -func (c *channelDefinitionCache) fetchLoop(ctx context.Context, log *channel_config_store.ChannelConfigStoreNewChannelDefinition) { +// fetchLoop is a retry goroutine spawned when an initial fetch attempt fails in fetchLatestLoop. +// It uses exponential backoff to retry fetching channel definitions until either the fetch succeeds, +// fetchRetryTimeout is reached or the cache is stopped (context cache shutdown). +// This isolates retry logic from the main fetch loop, allowing it to continue processing new triggers +// while retries occur in the background. +func (c *channelDefinitionCache) fetchLoop(trigger types.Trigger) { defer c.wg.Done() + var err error b := utils.NewHTTPFetchBackoff() - var attemptCnt int - err := c.fetchAndSetChannelDefinitions(ctx, log) - if err == nil { - c.lggr.Debugw("Set new channel definitions", "donID", c.donID, "version", log.Version, "url", log.Url, "sha", fmt.Sprintf("%x", log.Sha)) + ctx, cancel := c.chStop.CtxWithTimeout(fetchRetryTimeout) + defer cancel() + + if err = c.fetchAndSetChannelDefinitions(ctx, trigger); err == nil { return } - c.lggr.Warnw("Error while fetching channel definitions", "donID", c.donID, "version", log.Version, "url", log.Url, "sha", fmt.Sprintf("%x", log.Sha), "err", err, "attempt", attemptCnt) + c.lggr.Warnw("Error while fetching channel definitions", "donID", + c.donID, "err", err, "source", trigger.Source, "attempt", b.Attempt()) for { select { case <-ctx.Done(): return case <-time.After(b.Duration()): - attemptCnt++ - err := c.fetchAndSetChannelDefinitions(ctx, log) - if err != nil { - c.lggr.Warnw("Error while fetching channel definitions", "version", log.Version, "url", log.Url, "sha", fmt.Sprintf("%x", log.Sha), "err", err, "attempt", attemptCnt) + if err := c.fetchAndSetChannelDefinitions(ctx, trigger); err != nil { + c.lggr.Warnw("Error while fetching channel definitions", "donID", + c.donID, "err", err, "source", trigger.Source, "attempt", b.Attempt()) continue } - c.lggr.Debugw("Set new channel definitions", "donID", c.donID, "version", log.Version, "url", log.Url, "sha", fmt.Sprintf("%x", log.Sha)) return } } } -func (c *channelDefinitionCache) fetchAndSetChannelDefinitions(ctx context.Context, log *channel_config_store.ChannelConfigStoreNewChannelDefinition) error { - c.definitionsMu.RLock() - if log.Version <= c.definitionsVersion { - c.definitionsMu.RUnlock() - return nil - } - c.definitionsMu.RUnlock() - - cd, err := c.fetchChannelDefinitions(ctx, log.Url, log.Sha) +// fetchAndSetChannelDefinitions orchestrates fetching and storing channel definitions from a trigger. +// It checks that the trigger block number is newer than the current state to avoid processing stale +// events, fetches definitions from the URL and verifies the SHA hash, then stores them in +// c.definitions.Sources keyed by source ID. It also updates c.definitions.LastBlockNum and, for owner +// sources, c.definitions.Version. The actual merging of source definitions happens later when +// Definitions() is called. +// +// Returns an error if fetching, SHA verification, or JSON decoding fails. Note that adder limit +// checks occur during merging in Definitions(), where violations are handled by logging warnings +// and stopping processing for that source, not by returning errors. +func (c *channelDefinitionCache) fetchAndSetChannelDefinitions(ctx context.Context, trigger types.Trigger) error { + defs, err := c.fetchChannelDefinitions(ctx, trigger) if err != nil { - return err + return fmt.Errorf("failed to fetch channel definitions: %w", err) } + c.definitionsMu.Lock() - if log.Version <= c.definitionsVersion { - c.definitionsMu.Unlock() - return nil + defer c.definitionsMu.Unlock() + if sourceDef, exists := c.definitions.Sources[trigger.Source]; exists { + if sourceDef.Trigger.BlockNum > trigger.BlockNum { + return nil + } + } + + c.definitions.Sources[trigger.Source] = types.SourceDefinition{ + Trigger: trigger, + Definitions: defs, + } + + if trigger.Source == SourceOwner { + c.definitions.Version = trigger.Version } - c.definitions = cd - c.definitionsBlockNum = int64(log.Raw.BlockNumber) - c.definitionsVersion = log.Version - c.definitionsMu.Unlock() - if memoryVersion, persistedVersion, err := c.persist(ctx); err != nil { - // If this fails, the failedPersistLoop will try again - c.lggr.Warnw("Failed to persist channel definitions", "err", err, "memoryVersion", memoryVersion, "persistedVersion", persistedVersion) + if trigger.BlockNum > c.definitions.LastBlockNum { + c.definitions.LastBlockNum = trigger.BlockNum } + c.lggr.Infow("Set channel definitions for source", + "source", trigger.Source, "blockNum", trigger.BlockNum, "url", trigger.URL, "sha", hex.EncodeToString(trigger.SHA[:])) + return nil } -func (c *channelDefinitionCache) fetchChannelDefinitions(ctx context.Context, url string, expectedSha [32]byte) (llotypes.ChannelDefinitions, error) { - request, err := http.NewRequestWithContext(ctx, "GET", url, nil) +// fetchChannelDefinitions fetches channel definitions from the URL specified in the trigger, +// verifies the response SHA3 hash matches the expected hash from the on-chain event, decodes +// the JSON response, and annotates each definition with its source identifier. Returns an +// error if the URL is invalid, the HTTP request fails, the hash verification fails, or the +// JSON cannot be decoded. +func (c *channelDefinitionCache) fetchChannelDefinitions(ctx context.Context, trigger types.Trigger) (llotypes.ChannelDefinitions, error) { + u, err := url.ParseRequestURI(trigger.URL) + if err != nil { + return nil, fmt.Errorf("failed to parse URL %s: %w", trigger.URL, err) + } + + ctx, cancel := context.WithTimeout(ctx, defaultFetchTimeout) + defer cancel() + + request, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) if err != nil { - return nil, fmt.Errorf("failed to create http.Request; %w", err) + return nil, fmt.Errorf("failed to create HTTP request for channel definitions URL %s: %w", trigger.URL, err) } request.Header.Set("Content-Type", "application/json") @@ -397,14 +731,13 @@ func (c *channelDefinitionCache) fetchChannelDefinitions(ctx context.Context, ur Client: c.client, Request: request, Config: clhttp.RequestConfig{SizeLimit: c.httpLimit}, - Logger: c.lggr.Named("HTTPRequest").With("url", url, "expectedSHA", hex.EncodeToString(expectedSha[:])), + Logger: c.lggr.Named("HTTPRequest").With("url", trigger.URL, "expectedSHA", hex.EncodeToString(trigger.SHA[:])), } reader, statusCode, _, err := httpRequest.SendRequestReader() if err != nil { - return nil, fmt.Errorf("error making http request: %w", err) + return nil, fmt.Errorf("failed to make HTTP request to channel definitions URL %s: %w", trigger.URL, err) } - defer reader.Close() if statusCode >= 400 { // NOTE: Truncate the returned body here as we don't want to spam the @@ -413,10 +746,11 @@ func (c *channelDefinitionCache) fetchChannelDefinitions(ctx context.Context, ur defer body.Close() bodyBytes, err := io.ReadAll(body) if err != nil { - return nil, fmt.Errorf("got error from %s: (status code: %d, error reading response body: %w, response body: %s)", url, statusCode, err, bodyBytes) + return nil, fmt.Errorf("HTTP error from channel definitions URL %s (status %d): failed to read response body: %w (partial body: %s)", trigger.URL, statusCode, err, bodyBytes) } - return nil, fmt.Errorf("got error from %s: (status code: %d, response body: %s)", url, statusCode, string(bodyBytes)) + return nil, fmt.Errorf("HTTP error from channel definitions URL %s (status %d): %s", trigger.URL, statusCode, string(bodyBytes)) } + defer reader.Close() var buf bytes.Buffer // Use a teeReader to avoid excessive copying @@ -425,65 +759,63 @@ func (c *channelDefinitionCache) fetchChannelDefinitions(ctx context.Context, ur hash := sha3.New256() // Stream the data directly into the hash and copy to buf as we go if _, err := io.Copy(hash, teeReader); err != nil { - return nil, fmt.Errorf("failed to read from body: %w", err) + return nil, fmt.Errorf("failed to read channel definitions response body from %s: %w", trigger.URL, err) } actualSha := hash.Sum(nil) - if !bytes.Equal(expectedSha[:], actualSha) { - return nil, fmt.Errorf("SHA3 mismatch: expected %x, got %x", expectedSha, actualSha) + if !bytes.Equal(trigger.SHA[:], actualSha) { + return nil, fmt.Errorf("SHA3 mismatch for channel definitions from %s: expected %s, got %x", trigger.URL, hex.EncodeToString(trigger.SHA[:]), actualSha) } var cd llotypes.ChannelDefinitions decoder := json.NewDecoder(&buf) if err := decoder.Decode(&cd); err != nil { - return nil, fmt.Errorf("failed to decode JSON: %w", err) + return nil, fmt.Errorf("failed to decode channel definitions JSON from %s: %w", trigger.URL, err) + } + + // Annotate each definition with its source identifier. + for channelID, def := range cd { + def.Source = trigger.Source + cd[channelID] = def } return cd, nil } -//////////////////////////////////////////////////////////////////// -// Persistence -//////////////////////////////////////////////////////////////////// - -func (c *channelDefinitionCache) persist(ctx context.Context) (memoryVersion, persistedVersion uint32, err error) { - c.persistMu.RLock() - persistedVersion = c.persistedVersion - c.persistMu.RUnlock() +// persist atomically writes the in-memory source definitions (c.definitions.Sources) to the database. +// Returns the memory and persisted block numbers along with any error that occurred during persistence. +func (c *channelDefinitionCache) persist(ctx context.Context) (int64, int64, error) { + c.persistMu.Lock() + defer c.persistMu.Unlock() c.definitionsMu.RLock() - memoryVersion = c.definitionsVersion - dfns := c.definitions - blockNum := c.definitionsBlockNum + definitions := maps.Clone(c.definitions.Sources) + definitionsBlockNum := c.definitions.LastBlockNum + definitionsVersion := c.definitions.Version c.definitionsMu.RUnlock() - if memoryVersion <= persistedVersion { - return + if c.persistedBlockNum >= definitionsBlockNum { + return definitionsBlockNum, c.persistedBlockNum, nil } - if err = c.orm.StoreChannelDefinitions(ctx, c.addr, c.donID, memoryVersion, dfns, blockNum); err != nil { - return + definitionsJSON, err := json.Marshal(definitions) + if err != nil { + return definitionsBlockNum, c.persistedBlockNum, fmt.Errorf("failed to marshal definitions: %w", err) } - c.persistMu.Lock() - defer c.persistMu.Unlock() - if memoryVersion > c.persistedVersion { - persistedVersion = memoryVersion - c.persistedVersion = persistedVersion + err = c.orm.StoreChannelDefinitions(ctx, c.addr, c.donID, definitionsVersion, + definitionsJSON, definitionsBlockNum, MultiChannelDefinitionsFormat) + if err != nil { + return definitionsBlockNum, c.persistedBlockNum, fmt.Errorf("failed to store definitions: %w", err) } - // NOTE: We could, in theory, delete the old logs from logpoller here since - // they are no longer needed. But logpoller does not currently support - // that, and in any case, the number is likely to be small so not worth - // worrying about. - return + c.persistedBlockNum = definitionsBlockNum + return definitionsBlockNum, c.persistedBlockNum, nil } -// Checks persisted version and tries to save if necessary on a periodic timer -// Simple backup in case database persistence fails -func (c *channelDefinitionCache) failedPersistLoop() { +// persistLoop is an asynchronous goroutine that periodically persists the in-memory source definitions to the database. +func (c *channelDefinitionCache) persistLoop() { defer c.wg.Done() - ctx, cancel := c.chStop.NewCtx() defer cancel() @@ -491,38 +823,83 @@ func (c *channelDefinitionCache) failedPersistLoop() { select { case <-time.After(dbPersistLoopInterval): if memoryVersion, persistedVersion, err := c.persist(ctx); err != nil { - c.lggr.Warnw("Failed to persist channel definitions", "err", err, "memoryVersion", memoryVersion, "persistedVersion", persistedVersion) + c.lggr.Warnw("Failed to persist channel definitions", "err", err, "memoryVersion", memoryVersion, + "persistedVersion", persistedVersion) } case <-c.chStop: // Try one final persist with a short-ish timeout, then return ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() if memoryVersion, persistedVersion, err := c.persist(ctx); err != nil { - c.lggr.Errorw("Failed to persist channel definitions on shutdown", "err", err, "memoryVersion", memoryVersion, "persistedVersion", persistedVersion) + c.lggr.Errorw("Failed to persist channel definitions on shutdown", + "err", err, "memoryVersion", memoryVersion, "persistedVersion", persistedVersion) } return } } } +// Close stops the channel definition cache by canceling all contexts, closing the stop channel, +// and waiting for all goroutines to finish. It implements the services.Service interface. func (c *channelDefinitionCache) Close() error { return c.StopOnce("ChannelDefinitionCache", func() error { - // Cancel all contexts but try one final persist before closing + // Cancel all contexts by closing the stop channel and wait for all goroutines to finish close(c.chStop) c.wg.Wait() return nil }) } +// HealthReport returns a health report map containing the cache's health status. +// It implements the services.Service interface. func (c *channelDefinitionCache) HealthReport() map[string]error { report := map[string]error{c.Name(): c.Healthy()} return report } +// Name returns the name of the channel definition cache service. +// It implements the services.Service interface. func (c *channelDefinitionCache) Name() string { return c.lggr.Name() } -func (c *channelDefinitionCache) Definitions() llotypes.ChannelDefinitions { +// Definitions merges all source definitions stored in c.definitions.Sources with the provided previous +// outcome definitions and returns the merged result. It starts with a clone of the prev parameter, +// applying source authority rules and adder limits. If adder limit violations occur, warnings are +// logged and processing stops for that source, but processing continues with other sources. After merging all +// sources, it does not update any in-memory fields (merging is read-only). Persistence of source definitions +// happens separately via the persistLoop goroutine, not directly triggered by this method. +// This is the main method that performs the actual reconciliation of channel definitions from +// multiple sources with the previous outcome definitions. +func (c *channelDefinitionCache) Definitions(prev llotypes.ChannelDefinitions) llotypes.ChannelDefinitions { c.definitionsMu.RLock() defer c.definitionsMu.RUnlock() - return maps.Clone(c.definitions) + + // nothing to merge + if len(c.definitions.Sources) == 0 { + return prev + } + + merged := maps.Clone(prev) + if merged == nil { + merged = make(llotypes.ChannelDefinitions) + } + + src := make([]types.SourceDefinition, 0, len(c.definitions.Sources)) + for _, sourceDefinition := range c.definitions.Sources { + src = append(src, sourceDefinition) + } + + // process definitions deterministically + sort.Slice(src, func(i, j int) bool { + if src[i].Trigger.BlockNum == src[j].Trigger.BlockNum { + return src[i].Trigger.LogIndex < src[j].Trigger.LogIndex + } + return src[i].Trigger.BlockNum < src[j].Trigger.BlockNum + }) + + feedIDToChannelID := buildFeedIDMap(merged) + for _, sourceDefinition := range src { + c.mergeDefinitions(sourceDefinition.Trigger.Source, merged, sourceDefinition.Definitions, feedIDToChannelID) + } + + return merged } diff --git a/core/services/llo/channeldefinitions/onchain_channel_definition_cache_test.go b/core/services/llo/channeldefinitions/onchain_channel_definition_cache_test.go index 0cec6fd7cfa..5268303bcb4 100644 --- a/core/services/llo/channeldefinitions/onchain_channel_definition_cache_test.go +++ b/core/services/llo/channeldefinitions/onchain_channel_definition_cache_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "database/sql" + "encoding/json" "errors" "fmt" "io" @@ -13,13 +14,11 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-common/pkg/logger" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink-common/pkg/types/query" - "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/channel_config_store" "github.com/smartcontractkit/chainlink-evm/pkg/logpoller" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/services/llo/types" @@ -29,6 +28,8 @@ type mockLogPoller struct { latestBlock logpoller.Block latestBlockErr error filteredLogs []logpoller.Log + adderLogs []logpoller.Log + ownerLogs []logpoller.Log filteredLogsErr error unregisteredFilterNames []string @@ -41,6 +42,19 @@ func (m *mockLogPoller) LatestBlock(ctx context.Context) (logpoller.Block, error return m.latestBlock, m.latestBlockErr } func (m *mockLogPoller) FilteredLogs(ctx context.Context, filter []query.Expression, limitAndSort query.LimitAndSort, queryName string) ([]logpoller.Log, error) { + // Return different logs based on query name to simulate separate adder/owner queries + if queryName == "ChannelDefinitionCachePoller - NewAdderChannelDefinition" { + if len(m.adderLogs) > 0 { + return m.adderLogs, m.filteredLogsErr + } + return m.filteredLogs, m.filteredLogsErr + } + if queryName == "ChannelDefinitionCachePoller - NewOwnerChannelDefinition" { + if len(m.ownerLogs) > 0 { + return m.ownerLogs, m.filteredLogsErr + } + return m.filteredLogs, m.filteredLogsErr + } return m.filteredLogs, m.filteredLogsErr } func (m *mockLogPoller) UnregisterFilter(ctx context.Context, name string) error { @@ -67,19 +81,24 @@ type mockCDCORM struct { lastPersistedAddr common.Address lastPersistedDonID uint32 lastPersistedVersion uint32 - lastPersistedDfns llotypes.ChannelDefinitions + lastPersistedDfns map[uint32]types.SourceDefinition lastPersistedBlockNum int64 + lastPersistedFormat uint32 } func (m *mockCDCORM) LoadChannelDefinitions(ctx context.Context, addr common.Address, donID uint32) (pd *types.PersistedDefinitions, err error) { panic("not implemented") } -func (m *mockCDCORM) StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns llotypes.ChannelDefinitions, blockNum int64) (err error) { +func (m *mockCDCORM) StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns json.RawMessage, blockNum int64, format uint32) (err error) { m.lastPersistedAddr = addr m.lastPersistedDonID = donID m.lastPersistedVersion = version - m.lastPersistedDfns = dfns m.lastPersistedBlockNum = blockNum + m.lastPersistedFormat = format + // Unmarshal the json.RawMessage to store in lastPersistedDfns for test assertions + if err := json.Unmarshal(dfns, &m.lastPersistedDfns); err != nil { + return err + } return m.err } @@ -89,7 +108,7 @@ func (m *mockCDCORM) CleanupChannelDefinitions(ctx context.Context, addr common. func makeLog(t *testing.T, donID, version uint32, url string, sha [32]byte) logpoller.Log { data := makeLogData(t, donID, version, url, sha) - return logpoller.Log{EventSig: NewChannelDefinition, Topics: [][]byte{NewChannelDefinition[:], makeDonIDTopic(donID)}, Data: data} + return logpoller.Log{EventSig: NewChannelDefinition, Topics: [][]byte{NewChannelDefinition[:], makeDonIDTopic(donID)}, Data: data, BlockNumber: int64(version) + 1000} } func makeLogData(t *testing.T, donID, version uint32, url string, sha [32]byte) []byte { @@ -101,191 +120,479 @@ func makeLogData(t *testing.T, donID, version uint32, url string, sha [32]byte) return data } +func makeAdderLog(t *testing.T, donID, adderID uint32, url string, sha [32]byte, blockNumber int64) logpoller.Log { + data := makeAdderLogData(t, donID, adderID, url, sha) + return logpoller.Log{EventSig: ChannelDefinitionAdded, Topics: [][]byte{ChannelDefinitionAdded[:], makeDonIDTopic(donID), makeDonIDTopic(adderID)}, Data: data, BlockNumber: blockNumber} +} + +func makeAdderLogData(t *testing.T, donID, adderID uint32, url string, sha [32]byte) []byte { + event := channelConfigStoreABI.Events[channelDefinitionAddedEventName] + // donID and adderID are indexed (in Topics) + // url, sha are non-indexed (in Data) + data, err := event.Inputs.NonIndexed().Pack(url, sha) + require.NoError(t, err) + return data +} + func makeDonIDTopic(donID uint32) []byte { return common.BigToHash(big.NewInt(int64(donID))).Bytes() } +// drainChannel drains all values from a channel +func drainChannel[T any](ch chan T) { + for { + select { + case <-ch: + default: + return + } + } +} + +// collectTriggers collects all available triggers from a channel up to maxCount +func collectTriggers(ch chan types.Trigger, maxCount int) []types.Trigger { + triggers := make([]types.Trigger, 0, maxCount) + for i := 0; i < maxCount; i++ { + select { + case trigger := <-ch: + triggers = append(triggers, trigger) + default: + return triggers + } + } + return triggers +} + +// makeChannelDefinition creates a simple channel definition for testing +func makeChannelDefinition(channelID uint32, source uint32) llotypes.ChannelDefinition { + return llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: channelID, Aggregator: llotypes.AggregatorMedian}}, + Source: source, + Tombstone: false, + } +} + +// makeChannelDefinitionWithFeedID creates a channel definition with a FeedID in options for testing +func makeChannelDefinitionWithFeedID(channelID uint32, source uint32, feedID common.Hash) llotypes.ChannelDefinition { + optsJSON := fmt.Sprintf(`{"feedId":"%s"}`, feedID.Hex()) + return llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: channelID, Aggregator: llotypes.AggregatorMedian}}, + Source: source, + Tombstone: false, + Opts: llotypes.ChannelOpts(optsJSON), + } +} + +// addChannelDefinitions adds channel definitions to the given map for a range of channel IDs +func addChannelDefinitions(defs llotypes.ChannelDefinitions, startID, endID uint32, source uint32) { + for i := startID; i <= endID; i++ { + defs[i] = makeChannelDefinition(i, source) + } +} + func Test_ChannelDefinitionCache(t *testing.T) { donID := rand.Uint32() t.Run("Definitions", func(t *testing.T) { // NOTE: this is covered more thoroughly in the integration tests - dfns := llotypes.ChannelDefinitions(map[llotypes.ChannelID]llotypes.ChannelDefinition{ + prev := llotypes.ChannelDefinitions(map[llotypes.ChannelID]llotypes.ChannelDefinition{ 1: { ReportFormat: llotypes.ReportFormat(43), Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, Opts: llotypes.ChannelOpts{1, 2, 3}, + Source: SourceOwner, }, }) - cdc := &channelDefinitionCache{definitions: dfns} + // Test that Definitions() returns prev when sourceDefinitions is empty + cdc := &channelDefinitionCache{ + lggr: logger.TestSugared(t), + definitions: Definitions{ + Sources: make(map[uint32]types.SourceDefinition), + }, + orm: &mockCDCORM{}, // Required for persist() call in Definitions() + } + + result := cdc.Definitions(prev) + require.Equal(t, prev, result) + + // Test merging from sourceDefinitions + adderID := uint32(100) + sourceDefs := llotypes.ChannelDefinitions{ + 2: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: 2, Aggregator: llotypes.AggregatorMedian}}, + Source: adderID, + }, + } + cdc.definitions.Sources[adderID] = types.SourceDefinition{ + Trigger: types.Trigger{ + Source: adderID, + BlockNum: 1000, + }, + Definitions: sourceDefs, + } - assert.Equal(t, dfns, cdc.Definitions()) + result = cdc.Definitions(prev) + // Should contain both prev channel 1 and adder channel 2 + require.Contains(t, result, llotypes.ChannelID(1)) + require.Contains(t, result, llotypes.ChannelID(2)) + require.Equal(t, SourceOwner, result[1].Source) + require.Equal(t, adderID, result[2].Source) + + // Test tombstone removal + tombstoneDefs := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormat(43), + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, + Opts: llotypes.ChannelOpts{1, 2, 3}, + Source: SourceOwner, + Tombstone: false, + }, + 3: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: 3, Aggregator: llotypes.AggregatorMedian}}, + Source: SourceOwner, + Tombstone: true, + }, + } + cdc.definitions.Sources[SourceOwner] = types.SourceDefinition{ + Trigger: types.Trigger{ + Source: SourceOwner, + BlockNum: 2000, + }, + Definitions: tombstoneDefs, + } + + result = cdc.Definitions(prev) + // Tombstoned channel should be kept in definitions with Tombstone: true + require.Contains(t, result, llotypes.ChannelID(3)) + require.True(t, result[3].Tombstone, "channel 3 should be tombstoned") + // Channels 1 and 2 should still be present + require.Contains(t, result, llotypes.ChannelID(1)) + require.Contains(t, result, llotypes.ChannelID(2)) }) t.Run("readLogs", func(t *testing.T) { lp := &mockLogPoller{latestBlockErr: sql.ErrNoRows} - newLogCh := make(chan *channel_config_store.ChannelConfigStoreNewChannelDefinition, 100) - cdc := &channelDefinitionCache{donID: donID, lp: lp, lggr: logger.TestSugared(t), newLogCh: newLogCh} + fetchTriggerCh := make(chan types.Trigger, 100) + cdc := &channelDefinitionCache{ + donID: donID, + lp: lp, + lggr: logger.TestSugared(t), + fetchTriggerCh: fetchTriggerCh, + definitions: Definitions{ + Sources: make(map[uint32]types.SourceDefinition), + }, + } t.Run("skips if logpoller has no blocks", func(t *testing.T) { ctx := t.Context() err := cdc.readLogs(ctx) - assert.NoError(t, err) - assert.Nil(t, cdc.newLog) + require.NoError(t, err) }) t.Run("returns error on LatestBlock failure", func(t *testing.T) { ctx := t.Context() lp.latestBlockErr = errors.New("test error") err := cdc.readLogs(ctx) - assert.EqualError(t, err, "test error") - assert.Nil(t, cdc.newLog) + require.EqualError(t, err, "test error") }) t.Run("does nothing if LatestBlock older or the same as current channel definitions block", func(t *testing.T) { ctx := t.Context() lp.latestBlockErr = nil lp.latestBlock = logpoller.Block{BlockNumber: 42} - cdc.definitionsBlockNum = 43 + cdc.definitions.LastBlockNum = 43 err := cdc.readLogs(ctx) - assert.NoError(t, err) - assert.Nil(t, cdc.newLog) + require.NoError(t, err) }) t.Run("returns error if FilteredLogs fails", func(t *testing.T) { ctx := t.Context() - cdc.definitionsBlockNum = 0 + cdc.definitions.LastBlockNum = 0 lp.filteredLogsErr = errors.New("test error 2") err := cdc.readLogs(ctx) - assert.EqualError(t, err, "test error 2") - assert.Nil(t, cdc.newLog) + require.EqualError(t, err, "test error 2") }) t.Run("ignores logs with different topic", func(t *testing.T) { ctx := t.Context() lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{{EventSig: common.Hash{1, 2, 3, 4}}} + // Set logs with different event signature (not NewChannelDefinition or ChannelDefinitionAdded) + lp.ownerLogs = []logpoller.Log{{EventSig: common.Hash{1, 2, 3, 4}}} + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) - assert.NoError(t, err) - assert.Nil(t, cdc.newLog) + require.NoError(t, err) }) - t.Run("returns error if log is malformed", func(t *testing.T) { + t.Run("logs warning and continues if log is malformed", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + lp.latestBlock = logpoller.Block{BlockNumber: 2000} + lp.latestBlockErr = nil lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{{EventSig: NewChannelDefinition}} + // Set malformed owner log (has correct event sig but missing data) + lp.ownerLogs = []logpoller.Log{{EventSig: NewChannelDefinition}} + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) - assert.EqualError(t, err, "failed to unpack log data: abi: attempting to unmarshal an empty string while arguments are expected") - assert.Nil(t, cdc.newLog) + require.NoError(t, err, "should not return error for malformed log, should log warning and continue") + // Should not send trigger for malformed log + select { + case <-fetchTriggerCh: + t.Fatal("should not send trigger for malformed log") + default: + // Expected - no trigger + } }) - t.Run("sets definitions and sends on channel if FilteredLogs returns new event with a later version", func(t *testing.T) { + t.Run("sends trigger on channel if FilteredLogs returns new event with a later version", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + lp.latestBlock = logpoller.Block{BlockNumber: 2000} + lp.latestBlockErr = nil lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{makeLog(t, donID, uint32(43), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4})} + // Set owner logs + lp.ownerLogs = []logpoller.Log{makeLog(t, donID, uint32(43), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4})} + // Set empty adder logs + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) require.NoError(t, err) - require.NotNil(t, cdc.newLog) - assert.Equal(t, uint32(43), cdc.newLog.Version) - assert.Equal(t, "http://example.com/xxx.json", cdc.newLog.Url) - assert.Equal(t, [32]byte{1, 2, 3, 4}, cdc.newLog.Sha) - assert.Equal(t, int64(donID), cdc.newLog.DonId.Int64()) - - func() { - for { - select { - case log := <-newLogCh: - assert.Equal(t, cdc.newLog, log) - default: - return - } - } - }() + + // Check that fetch trigger was sent + select { + case trigger := <-fetchTriggerCh: + require.Equal(t, SourceOwner, trigger.Source) + require.Equal(t, uint32(43), trigger.Version) + require.Equal(t, "http://example.com/xxx.json", trigger.URL) + require.Equal(t, [32]byte{1, 2, 3, 4}, trigger.SHA) + default: + t.Fatal("expected fetch trigger signal in channel") + } }) - t.Run("does nothing if version older or the same as the one currently set", func(t *testing.T) { + t.Run("sends triggers for all logs", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + lp.latestBlock = logpoller.Block{BlockNumber: 2000} lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{ + // Set owner logs (readLogs calls FilteredLogs for owner logs) + lp.ownerLogs = []logpoller.Log{ makeLog(t, donID, uint32(42), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4}), makeLog(t, donID, uint32(43), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4}), } + // Set empty adder logs + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) require.NoError(t, err) - assert.Equal(t, uint32(43), cdc.newLog.Version) + // Should receive triggers for both owner logs + triggers := collectTriggers(fetchTriggerCh, 4) + require.Len(t, triggers, 2, "expected 2 triggers") + // Find the trigger with version 43 (latest) + var found43 bool + for _, trigger := range triggers { + if trigger.Version == 43 { + found43 = true + break + } + } + require.True(t, found43, "expected trigger with version 43") }) - t.Run("in case of multiple logs, takes the latest", func(t *testing.T) { + t.Run("in case of multiple logs, sends triggers for all", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + lp.latestBlock = logpoller.Block{BlockNumber: 2000} lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{ + // Set owner logs (readLogs calls FilteredLogs for owner logs) + lp.ownerLogs = []logpoller.Log{ makeLog(t, donID, uint32(42), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4}), makeLog(t, donID, uint32(45), "http://example.com/xxx2.json", [32]byte{2, 2, 3, 4}), makeLog(t, donID, uint32(44), "http://example.com/xxx3.json", [32]byte{3, 2, 3, 4}), makeLog(t, donID, uint32(43), "http://example.com/xxx4.json", [32]byte{4, 2, 3, 4}), } + // Set empty adder logs + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) require.NoError(t, err) - assert.Equal(t, uint32(45), cdc.newLog.Version) - assert.Equal(t, "http://example.com/xxx2.json", cdc.newLog.Url) - assert.Equal(t, [32]byte{2, 2, 3, 4}, cdc.newLog.Sha) - assert.Equal(t, int64(donID), cdc.newLog.DonId.Int64()) - - func() { - for { - select { - case log := <-newLogCh: - assert.Equal(t, cdc.newLog, log) - default: - return - } + + // Check that fetch triggers were sent for all owner logs + triggers := collectTriggers(fetchTriggerCh, 8) + require.Len(t, triggers, 4, "expected 4 triggers") + // Find the trigger with version 45 (latest) + var latestTrigger *types.Trigger + for i := range triggers { + if triggers[i].Version == 45 { + latestTrigger = &triggers[i] + break } - }() + } + require.NotNil(t, latestTrigger, "expected trigger with version 45") + require.Equal(t, "http://example.com/xxx2.json", latestTrigger.URL) + require.Equal(t, [32]byte{2, 2, 3, 4}, latestTrigger.SHA) }) t.Run("ignores logs with incorrect don ID", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) lp.filteredLogsErr = nil - lp.filteredLogs = []logpoller.Log{ + // Set owner logs with wrong donID + lp.ownerLogs = []logpoller.Log{ makeLog(t, donID+1, uint32(42), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4}), } + // Set empty adder logs + lp.adderLogs = []logpoller.Log{} err := cdc.readLogs(ctx) require.NoError(t, err) - assert.Equal(t, uint32(45), cdc.newLog.Version) - - func() { - for { - select { - case log := <-newLogCh: - t.Fatal("did not expect log with wrong donID, got: ", log) - default: - return - } - } - }() + + // Check that no fetch trigger was sent + select { + case trigger := <-fetchTriggerCh: + t.Fatalf("did not expect fetch trigger signal for log with wrong donID, got: %+v", trigger) + default: + // No signal, as expected + } }) t.Run("ignores logs with wrong number of topics", func(t *testing.T) { ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) lp.filteredLogsErr = nil lg := makeLog(t, donID, uint32(42), "http://example.com/xxx.json", [32]byte{1, 2, 3, 4}) lg.Topics = lg.Topics[:1] - lp.filteredLogs = []logpoller.Log{lg} + // Set owner log with wrong number of topics + lp.ownerLogs = []logpoller.Log{lg} + // Set empty adder logs + lp.adderLogs = []logpoller.Log{} + + err := cdc.readLogs(ctx) + require.NoError(t, err) + + // Check that no fetch trigger was sent + select { + case trigger := <-fetchTriggerCh: + t.Fatalf("did not expect fetch trigger signal for log with missing topics, got: %+v", trigger) + default: + // No signal, as expected + } + }) + t.Run("reads adder logs and sends triggers", func(t *testing.T) { + ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + lp.filteredLogsErr = nil + lp.latestBlock = logpoller.Block{BlockNumber: 2000} + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + adderID1 := uint32(100) + adderID2 := uint32(200) + // Set adder logs (readLogs calls FilteredLogs for adder logs first) + lp.adderLogs = []logpoller.Log{ + makeAdderLog(t, donID, adderID1, "http://example.com/adder1.json", [32]byte{1, 1, 1, 1}, 1500), + makeAdderLog(t, donID, adderID2, "http://example.com/adder2.json", [32]byte{2, 2, 2, 2}, 1600), + } + // Set empty owner logs + lp.ownerLogs = []logpoller.Log{} + + err := cdc.readLogs(ctx) + require.NoError(t, err) + + // Check that fetch triggers were sent for both adders + triggers := collectTriggers(fetchTriggerCh, 2) + require.Len(t, triggers, 2, "expected 2 triggers") + // Verify adder triggers + for _, trigger := range triggers { + require.NotEqual(t, SourceOwner, trigger.Source, "should not be owner") + require.True(t, trigger.Source == adderID1 || trigger.Source == adderID2, "should be one of the adder IDs") + if trigger.Source == adderID1 { + require.Equal(t, "http://example.com/adder1.json", trigger.URL) + require.Equal(t, [32]byte{1, 1, 1, 1}, trigger.SHA) + } else { + require.Equal(t, "http://example.com/adder2.json", trigger.URL) + require.Equal(t, [32]byte{2, 2, 2, 2}, trigger.SHA) + } + } + }) + t.Run("reads both owner and adder logs in one call", func(t *testing.T) { + ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + lp.filteredLogsErr = nil + lp.latestBlock = logpoller.Block{BlockNumber: 2000} + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + adderID := uint32(100) + // Set both adder and owner logs + lp.adderLogs = []logpoller.Log{ + makeAdderLog(t, donID, adderID, "http://example.com/adder.json", [32]byte{6, 6, 6, 6}, 1500), + } + lp.ownerLogs = []logpoller.Log{ + makeLog(t, donID, uint32(50), "http://example.com/owner.json", [32]byte{5, 5, 5, 5}), + } err := cdc.readLogs(ctx) require.NoError(t, err) - assert.Equal(t, uint32(45), cdc.newLog.Version) - - func() { - for { - select { - case log := <-newLogCh: - t.Fatal("did not expect log with missing topics, got: ", log) - default: - return - } + + // Should have triggers for both adder and owner logs + triggers := collectTriggers(fetchTriggerCh, 2) + require.Len(t, triggers, 2, "expected 2 triggers (one adder, one owner)") + // Verify we have both types + var foundOwner, foundAdder bool + for _, trigger := range triggers { + switch trigger.Source { + case SourceOwner: + foundOwner = true + require.Equal(t, uint32(50), trigger.Version) + require.Equal(t, "http://example.com/owner.json", trigger.URL) + case adderID: + foundAdder = true + require.Equal(t, "http://example.com/adder.json", trigger.URL) + require.Equal(t, [32]byte{6, 6, 6, 6}, trigger.SHA) } - }() + } + require.True(t, foundOwner, "expected owner trigger") + require.True(t, foundAdder, "expected adder trigger") + }) + t.Run("ignores adder logs with incorrect don ID", func(t *testing.T) { + ctx := t.Context() + // Drain any existing triggers + drainChannel(fetchTriggerCh) + lp.filteredLogsErr = nil + lp.latestBlock = logpoller.Block{BlockNumber: 2000} + cdc.definitions.LastBlockNum = 0 + cdc.initialBlockNum = 0 + adderID := uint32(100) + // Set adder logs with wrong donID + lp.adderLogs = []logpoller.Log{ + makeAdderLog(t, donID+1, adderID, "http://example.com/adder.json", [32]byte{1, 1, 1, 1}, 1500), + } + // Set empty owner logs + lp.ownerLogs = []logpoller.Log{} + + err := cdc.readLogs(ctx) + require.NoError(t, err) + // Should not send trigger for wrong donID + select { + case trigger := <-fetchTriggerCh: + t.Fatalf("did not expect fetch trigger signal for log with wrong donID, got: %+v", trigger) + default: + // No signal, as expected + } }) }) @@ -297,9 +604,24 @@ func Test_ChannelDefinitionCache(t *testing.T) { httpLimit: 2048, } - t.Run("nil ctx returns error", func(t *testing.T) { - _, err := cdc.fetchChannelDefinitions(nil, "notvalid://foos", [32]byte{}) //nolint:staticcheck // SA1012 we pass nil intentionally here - assert.EqualError(t, err, "failed to create http.Request; net/http: nil Context") + t.Run("invalid URL returns error", func(t *testing.T) { + ctx := t.Context() + // Set up mock to return error for invalid URL scheme + c.err = errors.New("unsupported protocol scheme") + c.resp = nil + + // Use a URL with invalid scheme that will fail at HTTP client level + // This avoids panic from URL parsing in the HTTP library + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://[::1", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + // The error could be from URL parsing or HTTP client - both are acceptable + require.Error(t, err) }) t.Run("networking error while making request returns error", func(t *testing.T) { @@ -307,8 +629,16 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.resp = nil c.err = errors.New("http request failed") - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", [32]byte{}) - assert.EqualError(t, err, "error making http request: http request failed") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "failed to make HTTP request to channel definitions URL") + require.Contains(t, err.Error(), "http request failed") }) t.Run("server returns 500 returns error", func(t *testing.T) { @@ -316,8 +646,16 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.err = nil c.resp = &http.Response{StatusCode: 500, Body: io.NopCloser(bytes.NewReader([]byte{1, 2, 3}))} - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", [32]byte{}) - assert.EqualError(t, err, "got error from http://example.com/definitions.json: (status code: 500, response body: \x01\x02\x03)") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "HTTP error from channel definitions URL http://example.com/definitions.json (status 500)") + require.Contains(t, err.Error(), "\x01\x02\x03") }) var largeBody = make([]byte, 2048) @@ -330,20 +668,37 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.err = nil c.resp = &http.Response{StatusCode: 404, Body: io.NopCloser(bytes.NewReader(largeBody))} - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", [32]byte{}) - assert.EqualError(t, err, "got error from http://example.com/definitions.json: (status code: 404, error reading response body: http: request body too large, response body: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa)") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "HTTP error from channel definitions URL http://example.com/definitions.json (status 404)") + require.Contains(t, err.Error(), "failed to read response body") + require.Contains(t, err.Error(), "http: request body too large") }) var hugeBody = make([]byte, 8096) - c.resp.Body = io.NopCloser(bytes.NewReader(hugeBody)) + c.resp = &http.Response{Body: io.NopCloser(bytes.NewReader(hugeBody))} t.Run("server returns body that is too large", func(t *testing.T) { ctx := t.Context() c.err = nil c.resp = &http.Response{StatusCode: 200, Body: io.NopCloser(bytes.NewReader(hugeBody))} - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", [32]byte{}) - assert.EqualError(t, err, "failed to read from body: http: request body too large") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "failed to read channel definitions response body from") + require.Contains(t, err.Error(), "http: request body too large") }) t.Run("server returns invalid JSON returns error", func(t *testing.T) { @@ -351,8 +706,17 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.err = nil c.resp = &http.Response{StatusCode: 200, Body: io.NopCloser(bytes.NewReader([]byte{1, 2, 3}))} - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", common.HexToHash("0xfd1780a6fc9ee0dab26ceb4b3941ab03e66ccd970d1db91612c66df4515b0a0a")) - assert.EqualError(t, err, "failed to decode JSON: invalid character '\\x01' looking for beginning of value") + expectedSha := common.HexToHash("0xfd1780a6fc9ee0dab26ceb4b3941ab03e66ccd970d1db91612c66df4515b0a0a") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte(expectedSha), + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "failed to decode channel definitions JSON from") + require.Contains(t, err.Error(), "invalid character '\\x01' looking for beginning of value") }) t.Run("SHA mismatch returns error", func(t *testing.T) { @@ -360,8 +724,17 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.err = nil c.resp = &http.Response{StatusCode: 200, Body: io.NopCloser(bytes.NewReader([]byte(`{"foo":"bar"}`)))} - _, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", [32]byte{}) - assert.EqualError(t, err, "SHA3 mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 4d3304d0d87c27a031cbb6bdf95da79b7b4552c3d0bef2e5a94f50810121e1e0") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte{}, + BlockNum: 0, + Version: 0, + } + _, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.Contains(t, err.Error(), "SHA3 mismatch for channel definitions from") + require.Contains(t, err.Error(), "expected 0000000000000000000000000000000000000000000000000000000000000000") + require.Contains(t, err.Error(), "got 4d3304d0d87c27a031cbb6bdf95da79b7b4552c3d0bef2e5a94f50810121e1e0") }) t.Run("valid JSON matching SHA returns channel definitions", func(t *testing.T) { @@ -389,77 +762,519 @@ func Test_ChannelDefinitionCache(t *testing.T) { c.err = nil c.resp = &http.Response{StatusCode: 200, Body: io.NopCloser(bytes.NewReader([]byte(valid)))} - cd, err := cdc.fetchChannelDefinitions(ctx, "http://example.com/definitions.json", common.HexToHash("0x367bbc75f7b6c9fc66a98ea99f837ea7ac4a3c2d6a9ee284de018bd02c41b52d")) - assert.NoError(t, err) - assert.Equal(t, llotypes.ChannelDefinitions{0x2a: llotypes.ChannelDefinition{ReportFormat: 0x1, Streams: []llotypes.Stream{llotypes.Stream{StreamID: 0x34, Aggregator: 0x1}, llotypes.Stream{StreamID: 0x35, Aggregator: 0x1}, llotypes.Stream{StreamID: 0x37, Aggregator: 0x3}}, Opts: llotypes.ChannelOpts{0x7b, 0x22, 0x62, 0x61, 0x73, 0x65, 0x55, 0x53, 0x44, 0x46, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x31, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x3a, 0x33, 0x36, 0x30, 0x30, 0x2c, 0x22, 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x30, 0x33, 0x36, 0x62, 0x34, 0x61, 0x61, 0x37, 0x65, 0x35, 0x37, 0x63, 0x61, 0x37, 0x62, 0x36, 0x38, 0x61, 0x65, 0x31, 0x62, 0x66, 0x34, 0x35, 0x36, 0x35, 0x33, 0x66, 0x35, 0x36, 0x62, 0x36, 0x35, 0x36, 0x66, 0x64, 0x33, 0x61, 0x61, 0x33, 0x33, 0x35, 0x65, 0x66, 0x37, 0x66, 0x61, 0x65, 0x36, 0x39, 0x36, 0x62, 0x36, 0x36, 0x33, 0x66, 0x31, 0x62, 0x38, 0x34, 0x37, 0x32, 0x22, 0x2c, 0x22, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x22, 0x3a, 0x22, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d}}}, cd) + expectedSha := common.HexToHash("0x367bbc75f7b6c9fc66a98ea99f837ea7ac4a3c2d6a9ee284de018bd02c41b52d") + trigger := types.Trigger{ + Source: SourceOwner, + URL: "http://example.com/definitions.json", + SHA: [32]byte(expectedSha), + BlockNum: 0, + Version: 0, + } + cd, err := cdc.fetchChannelDefinitions(ctx, trigger) + require.NoError(t, err) + expectedDef := llotypes.ChannelDefinition{ + ReportFormat: 0x1, + Streams: []llotypes.Stream{{StreamID: 0x34, Aggregator: 0x1}, {StreamID: 0x35, Aggregator: 0x1}, {StreamID: 0x37, Aggregator: 0x3}}, + Opts: llotypes.ChannelOpts{0x7b, 0x22, 0x62, 0x61, 0x73, 0x65, 0x55, 0x53, 0x44, 0x46, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x31, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x3a, 0x33, 0x36, 0x30, 0x30, 0x2c, 0x22, 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x30, 0x33, 0x36, 0x62, 0x34, 0x61, 0x61, 0x37, 0x65, 0x35, 0x37, 0x63, 0x61, 0x37, 0x62, 0x36, 0x38, 0x61, 0x65, 0x31, 0x62, 0x66, 0x34, 0x35, 0x36, 0x35, 0x33, 0x66, 0x35, 0x36, 0x62, 0x36, 0x35, 0x36, 0x66, 0x64, 0x33, 0x61, 0x61, 0x33, 0x33, 0x35, 0x65, 0x66, 0x37, 0x66, 0x61, 0x65, 0x36, 0x39, 0x36, 0x62, 0x36, 0x36, 0x33, 0x66, 0x31, 0x62, 0x38, 0x34, 0x37, 0x32, 0x22, 0x2c, 0x22, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x22, 0x3a, 0x22, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d}, + Source: SourceOwner, + } + require.Equal(t, llotypes.ChannelDefinitions{0x2a: expectedDef}, cd) }) }) t.Run("persist", func(t *testing.T) { + definitions := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, + Opts: llotypes.ChannelOpts(`{"foo":"bar"}`), + }, + } cdc := &channelDefinitionCache{ lggr: logger.TestSugared(t), orm: nil, addr: testutils.NewAddress(), donID: donID, - definitions: llotypes.ChannelDefinitions{ - 1: { - ReportFormat: llotypes.ReportFormat(43), - Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, - Opts: llotypes.ChannelOpts{1, 2, 3}, - }, + definitions: Definitions{ + LastBlockNum: 142, }, - definitionsBlockNum: 142, } - t.Run("does nothing if persisted version is up-to-date", func(t *testing.T) { + t.Run("persists current definitions", func(t *testing.T) { ctx := t.Context() - cdc.definitionsVersion = 42 - cdc.persistedVersion = 42 + orm := &mockCDCORM{} + cdc.orm = orm + cdc.definitions.Version = 42 + cdc.persistedBlockNum = 141 + cdc.definitions.LastBlockNum = 142 + cdc.definitions.Sources = map[uint32]types.SourceDefinition{ + SourceOwner: { + Trigger: types.Trigger{ + Source: SourceOwner, + BlockNum: 142, + Version: 42, + }, + Definitions: definitions, + }, + } - memoryVersion, persistedVersion, err := cdc.persist(ctx) - assert.NoError(t, err) - assert.Equal(t, uint32(42), memoryVersion) - assert.Equal(t, uint32(42), persistedVersion) - assert.Equal(t, uint32(42), cdc.persistedVersion) + // persist() always persists c.definitions (no comparison logic) + memoryBlockNum, persistedBlockNum, err := cdc.persist(ctx) + require.NoError(t, err) + require.Equal(t, int64(142), memoryBlockNum) + require.Equal(t, int64(142), persistedBlockNum) + require.Equal(t, int64(142), cdc.persistedBlockNum) + require.Equal(t, cdc.definitions.Sources, orm.lastPersistedDfns) }) orm := &mockCDCORM{} cdc.orm = orm - t.Run("returns error on db failure and does not update persisted version", func(t *testing.T) { + t.Run("returns error on db failure and does not update persisted block number", func(t *testing.T) { ctx := t.Context() - cdc.persistedVersion = 42 - cdc.definitionsVersion = 43 + cdc.persistedBlockNum = 141 + cdc.definitions.Version = 43 + cdc.definitions.LastBlockNum = 143 + cdc.definitions.Sources = map[uint32]types.SourceDefinition{ + SourceOwner: { + Trigger: types.Trigger{ + Source: SourceOwner, + BlockNum: 143, + Version: 43, + }, + Definitions: definitions, + }, + } orm.err = errors.New("test error") - memoryVersion, persistedVersion, err := cdc.persist(ctx) - assert.EqualError(t, err, "test error") - assert.Equal(t, uint32(43), memoryVersion) - assert.Equal(t, uint32(42), persistedVersion) - assert.Equal(t, uint32(42), cdc.persistedVersion) + // persist() always persists c.definitions + memoryBlockNum, persistedBlockNum, err := cdc.persist(ctx) + require.Contains(t, err.Error(), "test error") + require.Equal(t, int64(143), memoryBlockNum) + require.Equal(t, int64(141), persistedBlockNum) + require.Equal(t, int64(141), cdc.persistedBlockNum) }) - t.Run("updates persisted version on success", func(t *testing.T) { + t.Run("updates persisted block number on success", func(t *testing.T) { ctx := t.Context() - cdc.definitionsVersion = 43 + cdc.definitions.Version = 43 + cdc.definitions.LastBlockNum = 143 + cdc.definitions.Sources = map[uint32]types.SourceDefinition{ + SourceOwner: { + Trigger: types.Trigger{ + Source: SourceOwner, + BlockNum: 143, + Version: 43, + }, + Definitions: definitions, + }, + } + cdc.persistedBlockNum = 141 orm.err = nil - memoryVersion, persistedVersion, err := cdc.persist(ctx) - assert.NoError(t, err) - assert.Equal(t, uint32(43), memoryVersion) - assert.Equal(t, uint32(43), persistedVersion) - assert.Equal(t, uint32(43), cdc.persistedVersion) + // persist() always persists c.definitions + memoryBlockNum, persistedBlockNum, err := cdc.persist(ctx) + require.NoError(t, err) + require.Equal(t, int64(143), memoryBlockNum) + require.Equal(t, int64(143), persistedBlockNum) + require.Equal(t, int64(143), cdc.persistedBlockNum) + + require.Equal(t, cdc.addr, orm.lastPersistedAddr) + require.Equal(t, cdc.donID, orm.lastPersistedDonID) + require.Equal(t, cdc.definitions.Version, orm.lastPersistedVersion) + require.Equal(t, cdc.definitions.Sources, orm.lastPersistedDfns) + require.Equal(t, cdc.definitions.LastBlockNum, orm.lastPersistedBlockNum) + }) + }) + + t.Run("adder limits", func(t *testing.T) { + cdc := &channelDefinitionCache{ + lggr: logger.TestSugared(t), + } + + adderID := uint32(100) + + t.Run("rejects adder definition file with more than MaxChannelsPerAdder channels", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Create a new definition file with MaxChannelsPerAdder + 1 channels + // The limit is enforced based on existing channels plus new channels being added + // When trying to add the (MaxChannelsPerAdder+1)th channel, numberOfChannels will be MaxChannelsPerAdder + addChannelDefinitions(newDefinitions, 1, uint32(MaxChannelsPerAdder+1), adderID) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(adderID, currentDefinitions, newDefinitions, feedIDToChannelID) + // The implementation stops processing at MaxChannelsPerAdder and doesn't return an error + // Verify that only MaxChannelsPerAdder channels were added + require.LessOrEqual(t, len(currentDefinitions), MaxChannelsPerAdder, "should not exceed MaxChannelsPerAdder") + // Count channels from this adder source + adderChannelCount := uint32(0) + for _, def := range currentDefinitions { + if def.Source == adderID { + adderChannelCount++ + } + } + require.Equal(t, MaxChannelsPerAdder, int(adderChannelCount), "should have exactly MaxChannelsPerAdder channels from this adder") + }) + + t.Run("allows adder definition file with channels up to MaxChannelsPerAdder when most are existing", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Pre-populate with 90 existing channels (MaxChannelsPerAdder - 10) + // This tests that existing channels + new channels can total up to MaxChannelsPerAdder + existingEnd := uint32(90) + addChannelDefinitions(currentDefinitions, 1, existingEnd, adderID) + // Include these existing channels in the new definition file (they'll be skipped) + addChannelDefinitions(newDefinitions, 1, existingEnd, adderID) + + // Add 9 new channels to reach exactly MaxChannelsPerAdder total in the file + addChannelDefinitions(newDefinitions, existingEnd+1, existingEnd+9, adderID) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(adderID, currentDefinitions, newDefinitions, feedIDToChannelID) + // Should have 90 existing + 9 new = 99 (below MaxChannelsPerAdder) + require.Len(t, currentDefinitions, 99) + }) + + t.Run("owner definitions are not subject to adder limits", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Owner can add any number of channels (not subject to MaxChannelsPerAdder limit) + addChannelDefinitions(newDefinitions, 1, 20, SourceOwner) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + require.Len(t, currentDefinitions, 20) + }) + + t.Run("owner can have more than MaxChannelsPerAdder channels", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Owner can have more than MaxChannelsPerAdder channels + addChannelDefinitions(newDefinitions, 1, uint32(MaxChannelsPerAdder+10), SourceOwner) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + require.Len(t, currentDefinitions, MaxChannelsPerAdder+10) + }) + }) + + t.Run("owner removal", func(t *testing.T) { + cdc := &channelDefinitionCache{ + lggr: logger.TestSugared(t), + } + + t.Run("does not remove owner-defined channels missing from new definitions", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Set up current definitions with owner-defined channels 1, 2, 3, 4, 5 + addChannelDefinitions(currentDefinitions, 1, 5, SourceOwner) + + // New definitions only include channels 1, 3, 5 (missing 2 and 4) + newDefinitions[1] = makeChannelDefinition(1, SourceOwner) + newDefinitions[3] = makeChannelDefinition(3, SourceOwner) + newDefinitions[5] = makeChannelDefinition(5, SourceOwner) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channels 1, 3, 5 should be present (updated from newDefinitions) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Contains(t, currentDefinitions, llotypes.ChannelID(3)) + require.Contains(t, currentDefinitions, llotypes.ChannelID(5)) + + // Channels 2 and 4 should remain (not removed, just not in newDefinitions) + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.Contains(t, currentDefinitions, llotypes.ChannelID(4)) + + // Result should contain all 5 channels (2 and 4 remain from currentDefinitions) + require.Len(t, currentDefinitions, 5) + }) + + t.Run("preserves non-owner channels when owner updates definitions", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + adderID := uint32(100) + + // Set up current definitions with owner channels 1, 2 and adder channel 10 + addChannelDefinitions(currentDefinitions, 1, 2, SourceOwner) + currentDefinitions[10] = makeChannelDefinition(10, adderID) + + // New owner definitions only include channel 1 (missing channel 2) + newDefinitions[1] = makeChannelDefinition(1, SourceOwner) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Owner channel 1 should be present (updated from newDefinitions) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, SourceOwner, currentDefinitions[1].Source) + + // Owner channel 2 should remain (not removed, just not in newDefinitions) + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.Equal(t, SourceOwner, currentDefinitions[2].Source) + + // Adder channel 10 should be preserved + require.Contains(t, currentDefinitions, llotypes.ChannelID(10)) + require.Equal(t, adderID, currentDefinitions[10].Source) + + // Result should contain channel 1 (owner, updated), channel 2 (owner, preserved), and channel 10 (adder) + require.Len(t, currentDefinitions, 3) + }) + + t.Run("owner removal only happens when source is SourceOwner", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + adderID := uint32(200) + + // Set up current definitions with owner-defined channels 1, 2 + addChannelDefinitions(currentDefinitions, 1, 2, SourceOwner) + + // New definitions from adder only includes channel 1 + newDefinitions[1] = makeChannelDefinition(1, adderID) + + // When source is an adder (not SourceOwner), owner channels should NOT be removed + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(adderID, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Owner channel 1 should still be present (adder can't overwrite it) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, SourceOwner, currentDefinitions[1].Source, "channel 1 should still have owner source") + + // Owner channel 2 should still be present (not removed because source is not SourceOwner) + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.Equal(t, SourceOwner, currentDefinitions[2].Source) + + // Result should contain both owner channels (adder's attempt to add channel 1 is ignored) + require.Len(t, currentDefinitions, 2) + }) + + t.Run("owner can tombstone owner channels", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + adderID := uint32(300) + + // Set up current definitions with owner channel 1 and adder channel 2 + currentDefinitions[1] = makeChannelDefinition(1, SourceOwner) + currentDefinitions[2] = makeChannelDefinition(2, adderID) + + // Owner tries to tombstone owner channel 1 (should succeed) + newDefinitions[1] = llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}}, + Source: SourceOwner, + Tombstone: true, + } + + // Owner tries to tombstone adder channel 2 (should succeed) + newDefinitions[2] = llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Source: SourceOwner, + Tombstone: true, + } + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Result should contain both channels + require.Len(t, currentDefinitions, 2) + + // Owner channel 1 should be present (tombstone succeeded) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, SourceOwner, currentDefinitions[1].Source) + require.True(t, currentDefinitions[1].Tombstone, "channel 1 should be tombstoned") + + // Adder channel 2 should be kept in definitions with Tombstone: true (tombstone succeeded) + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.True(t, currentDefinitions[2].Tombstone, "channel 2 should be tombstoned") + }) + }) + + t.Run("feedID uniqueness", func(t *testing.T) { + cdc := &channelDefinitionCache{ + lggr: logger.TestSugared(t), + } + + adderID := uint32(100) + feedID1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + feedID2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + + t.Run("skips new channel with colliding FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // New channel 2 with same feedID1 (collision) + newDefinitions[2] = makeChannelDefinitionWithFeedID(2, SourceOwner, feedID1) + + // New channel 3 with unique feedID2 (should be added) + newDefinitions[3] = makeChannelDefinitionWithFeedID(3, SourceOwner, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should be present (existing) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + // Channel 2 should NOT be present (collision, skipped) + require.NotContains(t, currentDefinitions, llotypes.ChannelID(2)) + // Channel 3 should be present (unique FeedID) + require.Contains(t, currentDefinitions, llotypes.ChannelID(3)) + require.Len(t, currentDefinitions, 2) + }) + + t.Run("allows owner to update same channel with same FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // Owner updates channel 1 with same feedID1 (should be allowed) + updatedDef := makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + updatedDef.Streams = []llotypes.Stream{{StreamID: 999, Aggregator: llotypes.AggregatorMedian}} + newDefinitions[1] = updatedDef + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should be present and updated + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, uint32(999), currentDefinitions[1].Streams[0].StreamID) + require.Len(t, currentDefinitions, 1) + }) + + t.Run("skips owner update to same channel with colliding FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + // Existing channel 2 with feedID2 + currentDefinitions[2] = makeChannelDefinitionWithFeedID(2, SourceOwner, feedID2) + + // Owner tries to update channel 1 with feedID2 (collides with channel 2) + newDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should still have feedID1 (update was skipped) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, feedID1, extractFeedID(currentDefinitions[1].Opts)) + // Channel 2 should still be present + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.Len(t, currentDefinitions, 2) + }) + + t.Run("skips adder channel with colliding FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing owner channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // Adder tries to add channel 2 with same feedID1 (collision) + newDefinitions[2] = makeChannelDefinitionWithFeedID(2, adderID, feedID1) + + // Adder tries to add channel 3 with unique feedID2 (should be added) + newDefinitions[3] = makeChannelDefinitionWithFeedID(3, adderID, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(adderID, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should be present (existing) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + // Channel 2 should NOT be present (collision, skipped) + require.NotContains(t, currentDefinitions, llotypes.ChannelID(2)) + // Channel 3 should be present (unique FeedID) + require.Contains(t, currentDefinitions, llotypes.ChannelID(3)) + require.Len(t, currentDefinitions, 2) + }) + + t.Run("allows owner to update channel with new unique FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // Owner updates channel 1 with new unique feedID2 (should be allowed) + newDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should be present with new feedID2 + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Equal(t, feedID2, extractFeedID(currentDefinitions[1].Opts)) + require.Len(t, currentDefinitions, 1) + }) + + t.Run("skips multiple channels with same colliding FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // Multiple new channels with same colliding feedID1 + newDefinitions[2] = makeChannelDefinitionWithFeedID(2, SourceOwner, feedID1) + newDefinitions[3] = makeChannelDefinitionWithFeedID(3, SourceOwner, feedID1) + newDefinitions[4] = makeChannelDefinitionWithFeedID(4, SourceOwner, feedID1) + + // One channel with unique feedID2 + newDefinitions[5] = makeChannelDefinitionWithFeedID(5, SourceOwner, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) + + // Channel 1 should be present (existing) + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + // Channels 2, 3, 4 should NOT be present (all collided) + require.NotContains(t, currentDefinitions, llotypes.ChannelID(2)) + require.NotContains(t, currentDefinitions, llotypes.ChannelID(3)) + require.NotContains(t, currentDefinitions, llotypes.ChannelID(4)) + // Channel 5 should be present (unique FeedID) + require.Contains(t, currentDefinitions, llotypes.ChannelID(5)) + require.Len(t, currentDefinitions, 2) + }) + + t.Run("allows channels without FeedID", func(t *testing.T) { + currentDefinitions := make(llotypes.ChannelDefinitions) + newDefinitions := make(llotypes.ChannelDefinitions) + + // Existing channel 1 with feedID1 + currentDefinitions[1] = makeChannelDefinitionWithFeedID(1, SourceOwner, feedID1) + + // New channel 2 without FeedID (should be allowed) + newDefinitions[2] = makeChannelDefinition(2, SourceOwner) + + // New channel 3 with unique feedID2 (should be allowed) + newDefinitions[3] = makeChannelDefinitionWithFeedID(3, SourceOwner, feedID2) + + feedIDToChannelID := buildFeedIDMap(currentDefinitions) + cdc.mergeDefinitions(SourceOwner, currentDefinitions, newDefinitions, feedIDToChannelID) - assert.Equal(t, cdc.addr, orm.lastPersistedAddr) - assert.Equal(t, cdc.donID, orm.lastPersistedDonID) - assert.Equal(t, cdc.persistedVersion, orm.lastPersistedVersion) - assert.Equal(t, cdc.definitions, orm.lastPersistedDfns) - assert.Equal(t, cdc.definitionsBlockNum, orm.lastPersistedBlockNum) + // All channels should be present + require.Contains(t, currentDefinitions, llotypes.ChannelID(1)) + require.Contains(t, currentDefinitions, llotypes.ChannelID(2)) + require.Contains(t, currentDefinitions, llotypes.ChannelID(3)) + require.Len(t, currentDefinitions, 3) }) }) } func Test_filterName(t *testing.T) { s := types.ChannelDefinitionCacheFilterName(common.Address{1, 2, 3}, 654) - assert.Equal(t, "OCR3 LLO ChannelDefinitionCachePoller - 0x0102030000000000000000000000000000000000:654", s) + require.Equal(t, "OCR3 LLO ChannelDefinitionCachePoller - 0x0102030000000000000000000000000000000000:654", s) } diff --git a/core/services/llo/channeldefinitions/static_channel_definitions_cache.go b/core/services/llo/channeldefinitions/static_channel_definitions_cache.go index 91c0efadd81..d9758f9ec55 100644 --- a/core/services/llo/channeldefinitions/static_channel_definitions_cache.go +++ b/core/services/llo/channeldefinitions/static_channel_definitions_cache.go @@ -44,7 +44,7 @@ func (s *staticCDC) Close() error { }) } -func (s *staticCDC) Definitions() llotypes.ChannelDefinitions { +func (s *staticCDC) Definitions(prev llotypes.ChannelDefinitions) llotypes.ChannelDefinitions { return s.definitions } diff --git a/core/services/llo/cleanup_test.go b/core/services/llo/cleanup_test.go index f7ba8f50aa9..028c1180199 100644 --- a/core/services/llo/cleanup_test.go +++ b/core/services/llo/cleanup_test.go @@ -2,6 +2,7 @@ package llo import ( "context" + "encoding/json" "testing" "time" @@ -68,9 +69,9 @@ func Test_Cleanup(t *testing.T) { // add some channel definitions cdcorm := NewChainScopedORM(ds, chainSelector) { - err := cdcorm.StoreChannelDefinitions(ctx, addr1, donID1, 1, llotypes.ChannelDefinitions{}, 1) + err := cdcorm.StoreChannelDefinitions(ctx, addr1, donID1, 1, json.RawMessage(`{}`), 1, 1) require.NoError(t, err) - err = cdcorm.StoreChannelDefinitions(ctx, addr2, donID2, 1, llotypes.ChannelDefinitions{}, 1) + err = cdcorm.StoreChannelDefinitions(ctx, addr2, donID2, 1, json.RawMessage(`{}`), 1, 1) require.NoError(t, err) } @@ -134,10 +135,10 @@ func Test_StaleTransmissionReaper(t *testing.T) { err := torm.Insert(testutils.Context(t), transmissions) require.NoError(t, err) pgtest.MustExec(t, ds, ` -UPDATE llo_mercury_transmit_queue +UPDATE llo_mercury_transmit_queue SET inserted_at = NOW() - INTERVAL '48 hours' WHERE transmission_hash IN ( - SELECT transmission_hash FROM llo_mercury_transmit_queue + SELECT transmission_hash FROM llo_mercury_transmit_queue LIMIT 5 ); `) diff --git a/core/services/llo/cre/report_codec.go b/core/services/llo/cre/report_codec.go index ebb27137694..82f19bf8cb9 100644 --- a/core/services/llo/cre/report_codec.go +++ b/core/services/llo/cre/report_codec.go @@ -19,6 +19,7 @@ import ( ) var _ datastreamsllo.ReportCodec = ReportCodecCapabilityTrigger{} +var _ datastreamsllo.OptsParser = ReportCodecCapabilityTrigger{} type ReportCodecCapabilityTrigger struct { lggr logger.Logger @@ -60,7 +61,7 @@ func (r *ReportCodecCapabilityTriggerOpts) Encode() ([]byte, error) { // Encode a report into a capability trigger report // the returned byte slice is the marshaled protobuf of [capabilitiespb.OCRTriggerReport] -func (r ReportCodecCapabilityTrigger) Encode(report datastreamsllo.Report, cd llotypes.ChannelDefinition) ([]byte, error) { +func (r ReportCodecCapabilityTrigger) Encode(report datastreamsllo.Report, cd llotypes.ChannelDefinition, parsedOpts any) ([]byte, error) { if len(cd.Streams) != len(report.Values) { // Invariant violation return nil, fmt.Errorf("capability trigger expected %d streams, got %d", len(cd.Streams), len(report.Values)) @@ -70,12 +71,18 @@ func (r ReportCodecCapabilityTrigger) Encode(report datastreamsllo.Report, cd ll return nil, errors.New("capability trigger encoder does not currently support specimen reports") } - // NOTE: It seems suboptimal to have to parse the opts on every encode but - // not sure how to avoid it. Should be negligible performance hit as long - // as Opts is small. - opts := ReportCodecCapabilityTriggerOpts{} - if err := (&opts).Decode(cd.Opts); err != nil { - return nil, fmt.Errorf("failed to decode opts; got: '%s'; %w", cd.Opts, err) + var opts ReportCodecCapabilityTriggerOpts + if parsedOpts != nil { + // Use cached opts + var ok bool + opts, ok = parsedOpts.(ReportCodecCapabilityTriggerOpts) + if !ok { + return nil, fmt.Errorf("expected ReportCodecCapabilityTriggerOpts, got %T", parsedOpts) + } + } else { + if err := (&opts).Decode(cd.Opts); err != nil { + return nil, fmt.Errorf("failed to decode opts; got: '%s'; %w", cd.Opts, err) + } } payload := make([]*commonds.LLOStreamDecimal, len(report.Values)) @@ -158,3 +165,11 @@ func (r ReportCodecCapabilityTrigger) Verify(cd llotypes.ChannelDefinition) erro func (r ReportCodecCapabilityTrigger) EventID(report datastreamsllo.Report) string { return fmt.Sprintf("streams_%d_%d", r.donID, report.ObservationTimestampNanoseconds) } + +func (r ReportCodecCapabilityTrigger) ParseOpts(opts []byte) (any, error) { + var o ReportCodecCapabilityTriggerOpts + if err := o.Decode(opts); err != nil { + return nil, fmt.Errorf("failed to decode opts; got: '%s'; %w", opts, err) + } + return o, nil +} diff --git a/core/services/llo/cre/report_codec_test.go b/core/services/llo/cre/report_codec_test.go index eb12d935fb5..f2ed6cd24dd 100644 --- a/core/services/llo/cre/report_codec_test.go +++ b/core/services/llo/cre/report_codec_test.go @@ -37,7 +37,7 @@ func Test_ReportCodec(t *testing.T) { {StreamID: 1}, {StreamID: 2}, }, - }) + }, nil) require.NoError(t, err) var pbuf capabilitiespb.OCRTriggerReport @@ -100,7 +100,7 @@ func Test_ReportCodec(t *testing.T) { {StreamID: 3}, }, Opts: opts, - }) + }, nil) require.NoError(t, err) var pbuf capabilitiespb.OCRTriggerReport @@ -331,3 +331,134 @@ func Test_ReportCodec(t *testing.T) { require.EqualError(t, err, "multipliers length 3 != StreamValues length 2") }) } + +func TestReportCodecCapabilityTrigger_ParseOpts(t *testing.T) { + t.Run("ParseOpts: Valid opts with multipliers SUCCESS", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + multiplier1, err := decimal.NewFromString("1") + require.NoError(t, err) + multiplier2, err := decimal.NewFromString("1000000000000000000") // 10^18 + require.NoError(t, err) + multiplier3, err := decimal.NewFromString("1000000") // 10^6 + require.NoError(t, err) + + optsBytes, err := (&ReportCodecCapabilityTriggerOpts{ + Multipliers: []ReportCodecCapabilityTriggerMultiplier{ + {Multiplier: multiplier1, StreamID: 1}, + {Multiplier: multiplier2, StreamID: 2}, + {Multiplier: multiplier3, StreamID: 3}, + }, + }).Encode() + require.NoError(t, err) + + parsed, err := c.ParseOpts(optsBytes) + require.NoError(t, err) + require.NotNil(t, parsed) + + opts, ok := parsed.(ReportCodecCapabilityTriggerOpts) + require.True(t, ok, "parsed result should be ReportCodecCapabilityTriggerOpts") + + require.Len(t, opts.Multipliers, 3) + assert.True(t, opts.Multipliers[0].Multiplier.Equal(multiplier1)) + assert.Equal(t, uint32(1), opts.Multipliers[0].StreamID) + assert.True(t, opts.Multipliers[1].Multiplier.Equal(multiplier2)) + assert.Equal(t, uint32(2), opts.Multipliers[1].StreamID) + assert.True(t, opts.Multipliers[2].Multiplier.Equal(multiplier3)) + assert.Equal(t, uint32(3), opts.Multipliers[2].StreamID) + }) + + t.Run("ParseOpts: Empty opts nil SUCCESS", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + parsed, err := c.ParseOpts(nil) + require.NoError(t, err) + require.NotNil(t, parsed) + + opts, ok := parsed.(ReportCodecCapabilityTriggerOpts) + require.True(t, ok, "parsed result should be ReportCodecCapabilityTriggerOpts") + + assert.Nil(t, opts.Multipliers) + }) + + t.Run("ParseOpts: Empty opts empty byte slice SUCCESS", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + parsed, err := c.ParseOpts([]byte{}) + require.NoError(t, err) + require.NotNil(t, parsed) + + opts, ok := parsed.(ReportCodecCapabilityTriggerOpts) + require.True(t, ok, "parsed result should be ReportCodecCapabilityTriggerOpts") + + assert.Nil(t, opts.Multipliers) + }) + + t.Run("ParseOpts: Invalid JSON FAIL", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + invalidJSON := []byte("{invalid json}") + parsed, err := c.ParseOpts(invalidJSON) + + require.Error(t, err) + require.Nil(t, parsed) + assert.Contains(t, err.Error(), "failed to decode opts") + assert.Contains(t, err.Error(), string(invalidJSON)) + }) + + t.Run("ParseOpts: JSON with unknown fields FAIL", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + optsWithUnknownField := []byte(`{"multipliers":[],"unknown":"field"}`) + parsed, err := c.ParseOpts(optsWithUnknownField) + + require.Error(t, err) + require.Nil(t, parsed) + assert.Contains(t, err.Error(), "failed to decode opts") + assert.Contains(t, err.Error(), string(optsWithUnknownField)) + }) + + t.Run("ParseOpts: Wrong JSON structure multipliers as string FAIL", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + wrongTypeJSON := []byte(`{"multipliers":"not an array"}`) + parsed, err := c.ParseOpts(wrongTypeJSON) + + require.Error(t, err) + require.Nil(t, parsed) + assert.Contains(t, err.Error(), "failed to decode opts") + assert.Contains(t, err.Error(), string(wrongTypeJSON)) + }) + + t.Run("ParseOpts: Wrong JSON structure invalid multiplier type FAIL", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + invalidMultiplierJSON := []byte(`{"multipliers":[{"multiplier":"not a number","streamID":1}]}`) + parsed, err := c.ParseOpts(invalidMultiplierJSON) + + require.Error(t, err) + require.Nil(t, parsed) + assert.Contains(t, err.Error(), "failed to decode opts") + assert.Contains(t, err.Error(), string(invalidMultiplierJSON)) + }) + + t.Run("ParseOpts: Wrong JSON structure invalid streamID type FAIL", func(t *testing.T) { + donID := uint32(1) + c := NewReportCodecCapabilityTrigger(logger.Test(t), donID) + + invalidStreamIDJSON := []byte(`{"multipliers":[{"multiplier":"1000","streamID":"not a number"}]}`) + parsed, err := c.ParseOpts(invalidStreamIDJSON) + + require.Error(t, err) + require.Nil(t, parsed) + assert.Contains(t, err.Error(), "failed to decode opts") + assert.Contains(t, err.Error(), string(invalidStreamIDJSON)) + }) +} diff --git a/core/services/llo/cre/transmitter_test.go b/core/services/llo/cre/transmitter_test.go index e11e840f54f..576f8ec80f8 100644 --- a/core/services/llo/cre/transmitter_test.go +++ b/core/services/llo/cre/transmitter_test.go @@ -96,7 +96,7 @@ func encodeReport(t *testing.T, timestamp uint64) ocr3types.ReportWithInfo[lloty {StreamID: 2}, }, } - rawReport, err := codec.Encode(rep, cd) + rawReport, err := codec.Encode(rep, cd, nil) require.NoError(t, err) return ocr3types.ReportWithInfo[llotypes.ReportInfo]{ diff --git a/core/services/llo/orm.go b/core/services/llo/orm.go index f2b8a9509bc..7b75abf6a2c 100644 --- a/core/services/llo/orm.go +++ b/core/services/llo/orm.go @@ -3,13 +3,13 @@ package llo import ( "context" "database/sql" + "encoding/json" "errors" "fmt" "github.com/ethereum/go-ethereum/common" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" - llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink/v2/core/services/llo/channeldefinitions" "github.com/smartcontractkit/chainlink/v2/core/services/llo/types" ) @@ -42,15 +42,17 @@ func (o *chainScopedORM) LoadChannelDefinitions(ctx context.Context, addr common } // StoreChannelDefinitions will store a ChannelDefinitions list for a given chain_selector, addr, don_id -// It only updates if the new version is greater than the existing record -func (o *chainScopedORM) StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns llotypes.ChannelDefinitions, blockNum int64) error { +// It updates if the new version is greater than the existing record OR if the block number has changed +// (indicating definitions were updated even if version hasn't progressed) +func (o *chainScopedORM) StoreChannelDefinitions(ctx context.Context, addr common.Address, donID, version uint32, dfns json.RawMessage, blockNum int64, format uint32) error { _, err := o.ds.ExecContext(ctx, ` -INSERT INTO channel_definitions (chain_selector, addr, don_id, definitions, block_num, version, updated_at) -VALUES ($1, $2, $3, $4, $5, $6, NOW()) +INSERT INTO channel_definitions (chain_selector, addr, don_id, definitions, block_num, version, updated_at, format) +VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) ON CONFLICT (chain_selector, addr, don_id) DO UPDATE -SET definitions = $4, block_num = $5, version = $6, updated_at = NOW() -WHERE EXCLUDED.version > channel_definitions.version -`, o.chainSelector, addr, donID, dfns, blockNum, version) +SET definitions = $4, block_num = $5, version = $6, updated_at = NOW(), format = $7 +WHERE EXCLUDED.don_id = channel_definitions.don_id AND EXCLUDED.chain_selector = channel_definitions.chain_selector +AND (EXCLUDED.version >= channel_definitions.version OR EXCLUDED.block_num >= channel_definitions.block_num)`, + o.chainSelector, addr, donID, dfns, blockNum, version, format) if err != nil { return fmt.Errorf("StoreChannelDefinitions failed: %w", err) } diff --git a/core/services/llo/orm_test.go b/core/services/llo/orm_test.go index 3051c0f93b0..ce5964bb05c 100644 --- a/core/services/llo/orm_test.go +++ b/core/services/llo/orm_test.go @@ -1,6 +1,7 @@ package llo import ( + "encoding/json" "fmt" "math/rand" "testing" @@ -12,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/services/llo/channeldefinitions" "github.com/smartcontractkit/chainlink/v2/core/services/llo/types" ) @@ -41,42 +43,56 @@ func Test_ORM(t *testing.T) { expectedBlockNum2 := rand.Int63() cid1 := rand.Uint32() cid2 := rand.Uint32() + sourceID := uint32(1) channelDefsJSON := fmt.Sprintf(` { "%d": { - "reportFormat": 42, - "chainSelector": 142, - "streams": [{"streamId": 1, "aggregator": "median"}, {"streamId": 2, "aggregator": "mode"}], - "opts": {"foo":"bar"} - }, - "%d": { - "reportFormat": 43, - "chainSelector": 142, - "streams": [{"streamId": 1, "aggregator": "median"}, {"streamId": 3, "aggregator": "quote"}] + "trigger": { + "source": %d, + "url": "", + "sha": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "block_num": 142, + "log_index": 0, + "version": 1, + "tx_hash": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + }, + "definitions": { + "%d": { + "reportFormat": 42, + "chainSelector": 142, + "streams": [{"streamId": 1, "aggregator": "median"}, {"streamId": 2, "aggregator": "mode"}], + "opts": {"foo":"bar"} + }, + "%d": { + "reportFormat": 43, + "chainSelector": 142, + "streams": [{"streamId": 1, "aggregator": "median"}, {"streamId": 3, "aggregator": "quote"}] + } + } } } - `, cid1, cid2) + `, sourceID, sourceID, cid1, cid2) pgtest.MustExec(t, db, ` - INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, NOW()) - `, addr1, ETHMainnetChainSelector, 1, channelDefsJSON, expectedBlockNum, 1) + INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at, format) + VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) + `, addr1, ETHMainnetChainSelector, 1, channelDefsJSON, expectedBlockNum, 1, 1) pgtest.MustExec(t, db, ` - INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, NOW()) - `, addr2, ETHMainnetChainSelector, 1, `{}`, expectedBlockNum2, 1) + INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at, format) + VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) + `, addr2, ETHMainnetChainSelector, 1, `{}`, expectedBlockNum2, 1, 1) { // alternative chain selector; we expect these ones to be ignored pgtest.MustExec(t, db, ` - INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, NOW()) - `, addr1, OtherChainSelector, 1, channelDefsJSON, expectedBlockNum, 1) + INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at, format) + VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) + `, addr1, OtherChainSelector, 1, channelDefsJSON, expectedBlockNum, 1, 1) pgtest.MustExec(t, db, ` - INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at) - VALUES ($1, $2, $3, $4, $5, $6, NOW()) - `, addr3, OtherChainSelector, 1, channelDefsJSON, expectedBlockNum, 1) + INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at, format) + VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) + `, addr3, OtherChainSelector, 1, channelDefsJSON, expectedBlockNum, 1, 1) } pd, err := orm.LoadChannelDefinitions(ctx, addr1, donID1) @@ -87,6 +103,15 @@ func Test_ORM(t *testing.T) { assert.Equal(t, expectedBlockNum, pd.BlockNum) assert.Equal(t, donID1, pd.DonID) assert.Equal(t, uint32(1), pd.Version) + assert.Equal(t, uint32(1), pd.Format) + + // Unmarshal the definitions from json.RawMessage + var loadedDefs map[uint32]types.SourceDefinition + err = json.Unmarshal(pd.Definitions, &loadedDefs) + require.NoError(t, err) + require.Len(t, loadedDefs, 1) + assert.Equal(t, sourceID, loadedDefs[sourceID].Trigger.Source) + assert.Equal(t, int64(142), loadedDefs[sourceID].Trigger.BlockNum) assert.Equal(t, llotypes.ChannelDefinitions{ cid1: llotypes.ChannelDefinition{ ReportFormat: 42, @@ -97,13 +122,17 @@ func Test_ORM(t *testing.T) { ReportFormat: 43, Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, }, - }, pd.Definitions) + }, loadedDefs[sourceID].Definitions) // does not load erroneously for a different address pd, err = orm.LoadChannelDefinitions(ctx, addr2, donID1) require.NoError(t, err) - assert.Equal(t, llotypes.ChannelDefinitions{}, pd.Definitions) + // Unmarshal empty definitions + var emptyDefs map[uint32]types.SourceDefinition + err = json.Unmarshal(pd.Definitions, &emptyDefs) + require.NoError(t, err) + assert.Empty(t, emptyDefs) assert.Equal(t, expectedBlockNum2, pd.BlockNum) // does not load erroneously for a different don ID @@ -118,20 +147,53 @@ func Test_ORM(t *testing.T) { expectedBlockNum := rand.Int63() cid1 := rand.Uint32() cid2 := rand.Uint32() - defs := llotypes.ChannelDefinitions{ - cid1: llotypes.ChannelDefinition{ - ReportFormat: llotypes.ReportFormatJSON, - Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}}, - Opts: []byte(`{"foo":"bar"}`), + cid3 := rand.Uint32() + cid4 := rand.Uint32() + defs := map[uint32]types.SourceDefinition{ + 1: { + Trigger: types.Trigger{ + Source: 1, + BlockNum: 142, + Version: 42, + }, + Definitions: llotypes.ChannelDefinitions{ + cid1: llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}}, + Opts: []byte(`{"foo":"bar"}`), + }, + cid2: llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, + }, + }, }, - cid2: llotypes.ChannelDefinition{ - ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, - Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, + 2: { + Trigger: types.Trigger{ + Source: 2, + BlockNum: 142, + Version: 42, + }, + Definitions: llotypes.ChannelDefinitions{ + cid3: llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 2, Aggregator: llotypes.AggregatorMode}}, + Opts: []byte(`{"foo":"bar"}`), + }, + cid4: llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{{StreamID: 1, Aggregator: llotypes.AggregatorMedian}, {StreamID: 3, Aggregator: llotypes.AggregatorQuote}}, + }, + }, }, } + // Marshal definitions to json.RawMessage + defsJSON, err := json.Marshal(defs) + require.NoError(t, err) + t.Run("stores channel definitions in the database", func(t *testing.T) { - err := orm.StoreChannelDefinitions(ctx, addr1, donID1, 42, defs, expectedBlockNum) + err := orm.StoreChannelDefinitions(ctx, addr1, donID1, 42, defsJSON, expectedBlockNum, channeldefinitions.MultiChannelDefinitionsFormat) require.NoError(t, err) pd, err := orm.LoadChannelDefinitions(ctx, addr1, donID1) @@ -141,17 +203,30 @@ func Test_ORM(t *testing.T) { assert.Equal(t, expectedBlockNum, pd.BlockNum) assert.Equal(t, donID1, pd.DonID) assert.Equal(t, uint32(42), pd.Version) - assert.Equal(t, defs, pd.Definitions) + assert.Equal(t, channeldefinitions.MultiChannelDefinitionsFormat, pd.Format) + + // Unmarshal and compare + var loadedDefs map[uint32]types.SourceDefinition + err = json.Unmarshal(pd.Definitions, &loadedDefs) + require.NoError(t, err) + assert.Equal(t, defs, loadedDefs) }) t.Run("does not update if version is older than the database persisted version", func(t *testing.T) { // try to update with an older version - err := orm.StoreChannelDefinitions(ctx, addr1, donID1, 41, llotypes.ChannelDefinitions{}, expectedBlockNum) + emptyDefsJSON, err := json.Marshal(map[uint32]types.SourceDefinition{}) + require.NoError(t, err) + err = orm.StoreChannelDefinitions(ctx, addr1, donID1, 41, emptyDefsJSON, expectedBlockNum-1, channeldefinitions.MultiChannelDefinitionsFormat) require.NoError(t, err) pd, err := orm.LoadChannelDefinitions(ctx, addr1, donID1) require.NoError(t, err) assert.Equal(t, uint32(42), pd.Version) - assert.Equal(t, defs, pd.Definitions) + + // Unmarshal and verify original definitions are still there + var loadedDefs map[uint32]types.SourceDefinition + err = json.Unmarshal(pd.Definitions, &loadedDefs) + require.NoError(t, err) + assert.Equal(t, defs, loadedDefs) }) }) } diff --git a/core/services/llo/types/types.go b/core/services/llo/types/types.go index d1de39ea4d8..71cfa0ba199 100644 --- a/core/services/llo/types/types.go +++ b/core/services/llo/types/types.go @@ -1,6 +1,7 @@ package types import ( + "encoding/json" "strconv" "time" @@ -15,12 +16,32 @@ func ChannelDefinitionCacheFilterName(addr common.Address, donID uint32) string } type PersistedDefinitions struct { - ChainSelector uint64 `db:"chain_selector"` - Address common.Address `db:"addr"` - Definitions llotypes.ChannelDefinitions `db:"definitions"` - // The block number in which the log for this definitions was emitted - BlockNum int64 `db:"block_num"` - DonID uint32 `db:"don_id"` - Version uint32 `db:"version"` - UpdatedAt time.Time `db:"updated_at"` + ChainSelector uint64 `db:"chain_selector"` + Address common.Address `db:"addr"` + Definitions json.RawMessage `db:"definitions"` + BlockNum int64 `db:"block_num"` + DonID uint32 `db:"don_id"` + Version uint32 `db:"version"` + Format uint32 `db:"format"` + UpdatedAt time.Time `db:"updated_at"` +} + +// Trigger contains the information needed to fetch channel definitions from a URL. +// It is created from on-chain events and includes the source, URL, expected SHA hash, +// block number, version (for owner sources), and transaction hash. +type Trigger struct { + Source uint32 `json:"source"` + URL string `json:"url"` + SHA [32]byte `json:"sha"` + BlockNum int64 `json:"block_num"` + LogIndex int64 `json:"log_index"` + Version uint32 `json:"version"` + TxHash [32]byte `json:"tx_hash"` +} + +// SourceDefinition stores the channel definitions fetched from a specific source along with +// the trigger that initiated the fetch. +type SourceDefinition struct { + Trigger Trigger `json:"trigger"` + Definitions llotypes.ChannelDefinitions `json:"definitions"` } diff --git a/core/services/ocr2/plugins/llo/integration_test.go b/core/services/ocr2/plugins/llo/integration_test.go index 0b62130ac13..85f95d05aa5 100644 --- a/core/services/ocr2/plugins/llo/integration_test.go +++ b/core/services/ocr2/plugins/llo/integration_test.go @@ -36,6 +36,7 @@ import ( ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink-common/pkg/utils" datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo" lloevm "github.com/smartcontractkit/chainlink-data-streams/llo/reportcodecs/evm" "github.com/smartcontractkit/chainlink-evm/gethwrappers/generated/link_token_interface" @@ -51,7 +52,7 @@ import ( "github.com/smartcontractkit/chainlink-evm/pkg/llo" evmtestutils "github.com/smartcontractkit/chainlink-evm/pkg/testutils" evmtypes "github.com/smartcontractkit/chainlink-evm/pkg/types" - "github.com/smartcontractkit/chainlink-evm/pkg/utils" + evmutils "github.com/smartcontractkit/chainlink-evm/pkg/utils" ubig "github.com/smartcontractkit/chainlink-evm/pkg/utils/big" "github.com/smartcontractkit/chainlink/v2/core/config" @@ -72,7 +73,7 @@ var ( reportTimeout = time.Second * 60 ) -func setupBlockchain(t *testing.T) ( +func setupBlockchain(t *testing.T, adders ...*bind.TransactOpts) ( *bind.TransactOpts, evmtypes.Backend, *configurator.Configurator, @@ -90,6 +91,9 @@ func setupBlockchain(t *testing.T) ( ) { steve := evmtestutils.MustNewSimTransactor(t) // config contract deployer and owner genesisData := gethtypes.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}} + for _, adder := range adders { + genesisData[adder.From] = gethtypes.Account{Balance: assets.Ether(1000).ToInt()} + } backend := cltest.NewSimulatedBackend(t, genesisData, ethconfig.Defaults.Miner.GasCeil) backend.Commit() backend.Commit() // ensure starting block number at least 1 @@ -756,11 +760,11 @@ lloConfigMode = "bluegreen" const sampleTimestampsStockPriceChannelID = 7 const sampleTimestampedStreamValueChannelID = 8 - dexBasedAssetFeedID := utils.NewHash() - rwaFeedID := utils.NewHash() - benchmarkPriceFeedID := utils.NewHash() - timestampedStreamValueFeedID := utils.NewHash() - fundingRateFeedID := utils.NewHash() + dexBasedAssetFeedID := evmutils.NewHash() + rwaFeedID := evmutils.NewHash() + benchmarkPriceFeedID := evmutils.NewHash() + timestampedStreamValueFeedID := evmutils.NewHash() + fundingRateFeedID := evmutils.NewHash() simpleStreamlinedFeedID := pad32bytes(simpleStreamlinedChannelID) complexStreamlinedFeedID := pad32bytes(complexStreamlinedChannelID) sampleTimestampsStockPriceFeedID := pad32bytes(sampleTimestampsStockPriceChannelID) @@ -2173,6 +2177,521 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi }) } +func TestIntegration_LLO_channel_merging_owners_adders(t *testing.T) { + t.Parallel() + + offchainConfig := datastreamsllo.OffchainConfig{ + ProtocolVersion: 1, + DefaultMinReportIntervalNanoseconds: uint64(1 * time.Second), + EnableObservationCompression: true, + } + + clientCSAKeys := make([]csakey.KeyV2, nNodes) + clientPubKeys := make([]ed25519.PublicKey, nNodes) + + const salt = 400 + + for i := range nNodes { + k := big.NewInt(int64(salt + i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + + // Create adder accounts before creating backend + adder1 := evmtestutils.MustNewSimTransactor(t) + adder2 := evmtestutils.MustNewSimTransactor(t) + + steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress, _, _, _, _ := setupBlockchain(t, adder1, adder2) + fromBlock := 1 + + // Setup bootstrap + bootstrapCSAKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(salt - 1)) + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, _ := setupNode(t, bootstrapNodePort, "bootstrap_llo", backend, bootstrapCSAKey, nil) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + + t.Run("Channel merging lifecycle with owners and adders", func(t *testing.T) { + packetCh := make(chan *packet, 100000) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(salt - 2)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, serverKey, packetCh) + + serverURL := startMercuryServer(t, srv, clientPubKeys) + + donID := uint32(999888) + streams := []Stream{ethStream, linkStream} + streamMap := make(map[uint32]Stream) + for _, strm := range streams { + streamMap[strm.id] = strm + } + + // Setup oracle nodes + oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) { + c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC) + }) + + chainID := testutils.SimulatedChainID + relayType := "evm" + relayConfig := fmt.Sprintf(` +chainID = "%s" +fromBlock = %d +lloDonID = %d +lloConfigMode = "bluegreen" +`, chainID, fromBlock, donID) + addBootstrapJob(t, bootstrapNode, configuratorAddress, "job-channel-merge", relayType, relayConfig) + + // Configure adders on the contract + // Adder IDs start from 1000 + adder1ID := uint32(1001) + adder2ID := uint32(1002) + + require.NoError(t, utils.JustError(configStore.SetChannelAdderAddress(steve, adder1ID, adder1.From))) + backend.Commit() + require.NoError(t, utils.JustError(configStore.SetChannelAdderAddress(steve, adder2ID, adder2.From))) + backend.Commit() + + // Enable adders + require.NoError(t, utils.JustError(configStore.SetChannelAdder(steve, donID, adder1ID, true))) + backend.Commit() + require.NoError(t, utils.JustError(configStore.SetChannelAdder(steve, donID, adder2ID, true))) + backend.Commit() + + pluginConfig := fmt.Sprintf(`servers = { "%s" = "%x" } +donID = %d +channelDefinitionsContractAddress = "0x%x" +channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, configStoreAddress, fromBlock) + + // Add stream specs and LLO jobs to all nodes + for i, node := range nodes { + addMemoStreamSpecs(t, node, streams) + addLLOJob( + t, + node, + configuratorAddress, + bootstrapPeerID, + bootstrapNodePort, + clientPubKeys[i], + "channel-merge-test", + pluginConfig, + relayType, + relayConfig, + ) + } + + // Set initial OCR config + digest := setProductionConfig( + t, donID, steve, backend, configurator, configuratorAddress, nodes, WithOracles(oracles), WithOffchainConfig(offchainConfig), + ) + + // Track reports by channel ID + reportsByChannel := make(map[uint32][]datastreamsllo.Report) + lastReportTimeByChannel := make(map[uint32]time.Time) + + // Helper function to wait for reports from specific channels + waitForReportsFromChannels := func(t *testing.T, expectedChannels map[uint32]bool, timeout time.Duration) { + seenChannels := make(map[uint32]bool) + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) && len(seenChannels) < len(expectedChannels) { + pckt, err := receiveWithTimeout(t, packetCh, 2*time.Second) + if err != nil { + // If we're getting timeouts and haven't seen any channels yet, continue waiting + if len(seenChannels) == 0 { + continue + } + // If we've seen some channels but not all, continue waiting + continue + } + req := pckt.req + if req.ReportFormat != uint32(llotypes.ReportFormatJSON) { + continue + } + _, _, r, _, err := (datastreamsllo.JSONReportCodec{}).UnpackDecode(req.Payload) + if err != nil { + continue + } + + if expectedChannels[r.ChannelID] { + reportsByChannel[r.ChannelID] = append(reportsByChannel[r.ChannelID], r) + lastReportTimeByChannel[r.ChannelID] = time.Now() + seenChannels[r.ChannelID] = true + } + } + require.Len(t, seenChannels, len(expectedChannels), "expected reports from all channels: got %v, expected %v", seenChannels, expectedChannels) + } + + // Scenario 1: Owner adds initial channels + t.Run("Owner adds initial channels", func(t *testing.T) { + channelDefinitions := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 2: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 3: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + } + url, sha := newChannelDefinitionsServer(t, channelDefinitions) + + // Set channel definitions + _, err := configStore.SetChannelDefinitions(steve, donID, url, sha) + require.NoError(t, err) + backend.Commit() + + // Wait for channel definitions to be processed (give time for log polling and fetching) + time.Sleep(3 * time.Second) + + // Wait for reports from all owner channels + expectedChannels := map[uint32]bool{1: true, 2: true, 3: true} + waitForReportsFromChannels(t, expectedChannels, reportTimeout) + + // Verify reports were generated + require.NotEmpty(t, reportsByChannel[1], "channel 1 should have reports") + require.NotEmpty(t, reportsByChannel[2], "channel 2 should have reports") + require.NotEmpty(t, reportsByChannel[3], "channel 3 should have reports") + + // Verify report content + for channelID := range expectedChannels { + report := reportsByChannel[channelID][0] + assert.Equal(t, digest, report.ConfigDigest) + assert.False(t, report.Specimen) + if channelID == 3 { + assert.Equal(t, "13.25", report.Values[0].(*datastreamsllo.Decimal).String()) + } else { + assert.Equal(t, "2976.39", report.Values[0].(*datastreamsllo.Decimal).String()) + } + } + }) + + // Scenario 2: Adders add new channels + t.Run("Adders add new channels", func(t *testing.T) { + // Adder1 adds channels + adder1Definitions := llotypes.ChannelDefinitions{ + 10: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + Source: adder1ID, + Tombstone: false, + }, + 11: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adder1DefinitionsJSON, err := json.MarshalIndent(adder1Definitions, "", " ") + require.NoError(t, err) + adder1DefinitionsSHA := sha3.Sum256(adder1DefinitionsJSON) + + adder1Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, errWrite := w.Write(adder1DefinitionsJSON) + if errWrite != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(adder1Server.Close) + + _, err = configStore.AddChannelDefinitions(adder1, donID, adder1ID, adder1Server.URL, adder1DefinitionsSHA) + require.NoError(t, err) + backend.Commit() + + // Adder2 adds channels + adder2Definitions := llotypes.ChannelDefinitions{ + 20: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 21: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + } + + adder2DefinitionsJSON, err := json.MarshalIndent(adder2Definitions, "", " ") + require.NoError(t, err) + adder2DefinitionsSHA := sha3.Sum256(adder2DefinitionsJSON) + + adder2Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, errWrite := w.Write(adder2DefinitionsJSON) + if errWrite != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(adder2Server.Close) + + _, err = configStore.AddChannelDefinitions(adder2, donID, adder2ID, adder2Server.URL, adder2DefinitionsSHA) + require.NoError(t, err) + backend.Commit() + + // Wait for channel definitions to be processed (give time for log polling and fetching) + time.Sleep(3 * time.Second) + + // Wait for reports from all channels (owner + adders) + expectedChannels := map[uint32]bool{1: true, 2: true, 3: true, 10: true, 11: true, 20: true, 21: true} + waitForReportsFromChannels(t, expectedChannels, reportTimeout) + + // Verify all channels have reports + for channelID := range expectedChannels { + require.NotEmpty(t, reportsByChannel[channelID], "channel %d should have reports", channelID) + } + }) + + // Scenario 3: Owner tombstone some channels + t.Run("Owner tombstone channels", func(t *testing.T) { + // Owner updates definitions, add tombstone to channel 2 and 21 + channelDefinitions := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 2: { + ReportFormat: llotypes.ReportFormatJSON, + Tombstone: true, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 3: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 21: { + ReportFormat: llotypes.ReportFormatJSON, + Tombstone: true, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + } + url, sha := newChannelDefinitionsServer(t, channelDefinitions) + + // Set channel definitions with tombstoned channels 2 and 21 + _, err := configStore.SetChannelDefinitions(steve, donID, url, sha) + require.NoError(t, err) + backend.Commit() + + // Verify that channels 2 and 21 stop producing reports after tombstoning + // We wait for a period where we don't see reports from these channels + tombstonedChannels := map[uint32]bool{2: true, 21: true} + checkPeriod := 5 * time.Second + + require.Eventually(t, func() bool { + // Collect reports for a period and verify tombstoned channels don't appear + startTime := time.Now() + seenTombstonedChannels := make(map[uint32]bool) + + for time.Since(startTime) < checkPeriod { + pckt, err := receiveWithTimeout(t, packetCh, 1*time.Second) + if err != nil { + // Timeout is okay, continue checking + continue + } + req := pckt.req + if req.ReportFormat == uint32(llotypes.ReportFormatJSON) { + _, _, r, _, err := (datastreamsllo.JSONReportCodec{}).UnpackDecode(req.Payload) + if err == nil && tombstonedChannels[r.ChannelID] { + seenTombstonedChannels[r.ChannelID] = true + } + } + } + + // Success if we didn't see any reports from tombstoned channels + return len(seenTombstonedChannels) == 0 + }, 30*time.Second, 100*time.Millisecond, "channels 2 and 21 should stop producing reports after tombstoning") + }) + + // Scenario 4: Owner overwrites adder channel + t.Run("Owner overwrites adder channel", func(t *testing.T) { + // Owner sets a channel definition with same ID as adder1's channel 10 + channelDefinitions := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 3: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + 10: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: linkStreamID, // Changed from ethStreamID to linkStreamID + Aggregator: llotypes.AggregatorMedian, + }, + }, + }, + } + url, sha := newChannelDefinitionsServer(t, channelDefinitions) + + // Set channel definitions + _, err := configStore.SetChannelDefinitions(steve, donID, url, sha) + require.NoError(t, err) + backend.Commit() + + // Wait for channel definitions to be processed + time.Sleep(10 * time.Second) + + // Wait for reports from channel 10 and verify it eventually uses owner's configuration (linkStreamID) + // The owner's definition should take precedence over the adder's definition + foundOwnerReport := false + deadline := time.Now().Add(reportTimeout) + for time.Now().Before(deadline) && !foundOwnerReport { + pckt, err := receiveWithTimeout(t, packetCh, 2*time.Second) + if err != nil { + continue + } + req := pckt.req + if req.ReportFormat == uint32(llotypes.ReportFormatJSON) { + _, _, r, _, err := (datastreamsllo.JSONReportCodec{}).UnpackDecode(req.Payload) + if err == nil && r.ChannelID == 10 { + // Check if it has linkStream value (13.25) - owner's configuration + // It might still have ethStream value (2976.39) initially, but should eventually switch + value := r.Values[0].(*datastreamsllo.Decimal).String() + if value == "13.25" { + foundOwnerReport = true + } + } + } + } + assert.True(t, foundOwnerReport, "should eventually receive report from channel 10 with owner's configuration (linkStream=13.25) after overwrite") + }) + + // Scenario 5: Verify adder cannot remove channels + t.Run("Adder cannot remove channels", func(t *testing.T) { + // Adder1 tries to set definitions that exclude channel 11 (which they previously added) + adder1NewDefinitions := llotypes.ChannelDefinitions{ + 10: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: ethStreamID, + Aggregator: llotypes.AggregatorMedian, + }, + }, + Source: adder1ID, + Tombstone: false, + }, + // Channel 11 is intentionally omitted + } + + adder1NewDefinitionsJSON, err := json.MarshalIndent(adder1NewDefinitions, "", " ") + require.NoError(t, err) + adder1NewDefinitionsSHA := sha3.Sum256(adder1NewDefinitionsJSON) + + adder1NewServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, errWrite := w.Write(adder1NewDefinitionsJSON) + if errWrite != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + })) + t.Cleanup(adder1NewServer.Close) + + _, err = configStore.AddChannelDefinitions(adder1, donID, adder1ID, adder1NewServer.URL, adder1NewDefinitionsSHA) + require.NoError(t, err) + backend.Commit() + + // Wait for processing + time.Sleep(3 * time.Second) + + // Verify channel 11 still produces reports (adder cannot remove it) + foundChannel11Report := false + deadline := time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && !foundChannel11Report { + pckt, err := receiveWithTimeout(t, packetCh, 1*time.Second) + if err != nil { + continue + } + req := pckt.req + if req.ReportFormat == uint32(llotypes.ReportFormatJSON) { + _, _, r, _, err := (datastreamsllo.JSONReportCodec{}).UnpackDecode(req.Payload) + if err == nil && r.ChannelID == 11 { + foundChannel11Report = true + } + } + } + assert.True(t, foundChannel11Report, "channel 11 should still produce reports (adder cannot remove)") + }) + }) +} + func setupNodes(t *testing.T, nNodes int, backend evmtypes.Backend, clientCSAKeys []csakey.KeyV2, f func(*chainlink.Config)) (oracles []confighelper.OracleIdentityExtra, nodes []Node) { ports := freeport.GetN(t, nNodes) for i := range nNodes { @@ -2205,10 +2724,13 @@ func newChannelDefinitionsServer(t *testing.T, channelDefinitions llotypes.Chann channelDefinitionsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "GET", r.Method) assert.Equal(t, "application/json", r.Header.Get("Content-Type")) + _, errWrite := w.Write(channelDefinitionsJSON) + if errWrite != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - _, err := w.Write(channelDefinitionsJSON) - require.NoError(t, err) })) t.Cleanup(channelDefinitionsServer.Close) return channelDefinitionsServer.URL, channelDefinitionsSHA diff --git a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go index dc410f58d5f..3a495a6f466 100644 --- a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go +++ b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go @@ -9,6 +9,7 @@ import ( "io" "math/rand" "net/http" + "strconv" "sync" "testing" "time" @@ -37,25 +38,50 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/llo" "github.com/smartcontractkit/chainlink/v2/core/services/llo/channeldefinitions" + llotypes2 "github.com/smartcontractkit/chainlink/v2/core/services/llo/types" ) type mockHTTPClient struct { - resp *http.Response - err error - mu sync.Mutex + responses map[string]*http.Response + errors map[string]error + mu sync.Mutex } func (h *mockHTTPClient) Do(req *http.Request) (*http.Response, error) { h.mu.Lock() defer h.mu.Unlock() - return h.resp, h.err + url := req.URL.String() + // Check for URL-specific response first + if err, ok := h.errors[url]; ok { + return nil, err + } + if resp, ok := h.responses[url]; ok { + return resp, nil + } + // Fall back to default response (for backward compatibility with old tests) + if err, ok := h.errors[""]; ok { + return nil, err + } + if resp, ok := h.responses[""]; ok { + return resp, nil + } + return nil, fmt.Errorf("no response configured for URL: %s", url) } -func (h *mockHTTPClient) SetResponse(resp *http.Response, err error) { +func (h *mockHTTPClient) SetResponseForURL(url string, resp *http.Response, err error) { h.mu.Lock() defer h.mu.Unlock() - h.resp = resp - h.err = err + if h.responses == nil { + h.responses = make(map[string]*http.Response) + h.errors = make(map[string]error) + } + if err != nil { + h.errors[url] = err + delete(h.responses, url) + } else { + h.responses[url] = resp + delete(h.errors, url) + } } type MockReadCloser struct { @@ -86,7 +112,36 @@ func (m *MockReadCloser) Close() error { return err } +// extractChannelDefinitions unmarshals json.RawMessage and merges all channel definitions from source definitions into a single map +func extractChannelDefinitions(defsJSON json.RawMessage) llotypes.ChannelDefinitions { + var sourceDefs map[uint32]llotypes2.SourceDefinition + if err := json.Unmarshal(defsJSON, &sourceDefs); err != nil { + return make(llotypes.ChannelDefinitions) + } + result := make(llotypes.ChannelDefinitions) + for _, sourceDef := range sourceDefs { + for channelID, def := range sourceDef.Definitions { + result[channelID] = def + } + } + return result +} + +// countChannels unmarshals json.RawMessage and counts the total number of channels across all source definitions +func countChannels(defsJSON json.RawMessage) int { + var sourceDefs map[uint32]llotypes2.SourceDefinition + if err := json.Unmarshal(defsJSON, &sourceDefs); err != nil { + return 0 + } + count := 0 + for _, sourceDef := range sourceDefs { + count += len(sourceDef.Definitions) + } + return count +} + func Test_ChannelDefinitionCache_Integration(t *testing.T) { + t.Parallel() var ( invalidDefinitions = []byte(`{{{`) invalidDefinitionsSHA = sha3.Sum256(invalidDefinitions) @@ -104,6 +159,8 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { Aggregator: llotypes.AggregatorMode, }, }, + Tombstone: false, + Source: channeldefinitions.SourceOwner, }, 2: { ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, @@ -121,7 +178,9 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { Aggregator: llotypes.AggregatorQuote, }, }, - Opts: llotypes.ChannelOpts([]byte(`{"baseUSDFee":"0.1","expirationWindow":86400,"feedId":"0x0003aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","multiplier":"1000000000000000000"}`)), + Opts: llotypes.ChannelOpts([]byte(`{"baseUSDFee":"0.1","expirationWindow":86400,"feedId":"0x0003aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","multiplier":"1000000000000000000"}`)), + Tombstone: false, + Source: channeldefinitions.SourceOwner, }, } ) @@ -166,209 +225,1247 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { servicetest.Run(t, cdc) t.Run("before any logs, returns empty Definitions", func(t *testing.T) { - assert.Empty(t, cdc.Definitions()) + assert.Empty(t, cdc.Definitions(llotypes.ChannelDefinitions{})) + }) + + t.Run("with sha mismatch, should not update", func(t *testing.T) { + // clear the log messages + t.Cleanup(func() { observedLogs.TakeAll() }) + + { + url := "http://example.com/foo" + rc := NewMockReadCloser(invalidDefinitions) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + + backend.Commit() + } + + testutils.WaitForLogMessageWithField(t, observedLogs, + "Error while fetching channel definitions", + "err", "SHA3 mismatch for channel definitions") + + assert.Empty(t, cdc.Definitions(llotypes.ChannelDefinitions{})) + }) + + t.Run("after correcting sha with new channel definitions set on-chain, but with invalid JSON at url, should not update", func(t *testing.T) { + // clear the log messages before waiting for new ones + observedLogs.TakeAll() + + { + url := "http://example.com/foo" + rc := NewMockReadCloser(invalidDefinitions) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, invalidDefinitionsSHA))) + backend.Commit() + } + + testutils.WaitForLogMessageWithField(t, observedLogs, + "Error while fetching channel definitions", + "err", "failed to fetch channel definitions: failed to decode channel definitions JSON from http://example.com/foo: invalid character '{' looking for beginning of object key string") + assert.Empty(t, cdc.Definitions(llotypes.ChannelDefinitions{})) + }) + + t.Run("if server returns 404, should not update", func(t *testing.T) { + // clear the log messages before waiting for new ones + observedLogs.TakeAll() + + { + rc := NewMockReadCloser([]byte("not found")) + url := "http://example.com/foo3" + client.SetResponseForURL(url, &http.Response{ + StatusCode: 404, + Body: rc, + }, nil) + + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + backend.Commit() + } + + testutils.WaitForLogMessageWithField(t, observedLogs, + "Error while fetching channel definitions", + "err", "(status 404): not found") + }) + + t.Run("if server starts returning empty body, still does not update", func(t *testing.T) { + // clear the log messages before waiting for new ones + observedLogs.TakeAll() + + { + rc := NewMockReadCloser([]byte{}) + url := "http://example.com/foo3" + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + } + + testutils.WaitForLogMessageWithField(t, observedLogs, + "Error while fetching channel definitions", "err", "failed to fetch channel definitions: SHA3 mismatch for channel definitions") + }) + + t.Run("when URL starts returning valid JSON, updates even without needing new logs", func(t *testing.T) { + // clear the log messages before waiting for new ones + observedLogs.TakeAll() + + { + rc := NewMockReadCloser(sampleDefinitionsJSON) + url := "http://example.com/foo3" + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + } + + // Wait for the log trigger to be processed + le := testutils.WaitForLogMessage(t, observedLogs, "Set channel definitions for source") + fields := le.ContextMap() + assert.Contains(t, fields, "source") + assert.Contains(t, fields, "url") + assert.Contains(t, fields, "sha") + assert.Contains(t, fields, "blockNum") + assert.NotContains(t, fields, "err") + + assert.Equal(t, channeldefinitions.SourceOwner, fields["source"]) + assert.Equal(t, "http://example.com/foo3", fields["url"]) + assert.Equal(t, hex.EncodeToString(sampleDefinitionsSHA[:]), fields["sha"]) + + // Wait for definitions to be fetched and merged + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) > 0 + }, 5*time.Second, 100*time.Millisecond, "definitions should be available") + + assert.Equal(t, sampleDefinitions, cdc.Definitions(llotypes.ChannelDefinitions{})) + + t.Run("latest channel definitions are persisted", func(t *testing.T) { + // Wait for initial persistence to complete (persistLoop periodically persists source definitions) + var prevOutcome *llotypes2.PersistedDefinitions + require.Eventually(t, func() bool { + loaded, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + if err != nil || loaded == nil { + return false + } + // Check if we have the expected number of channels across all sources + if countChannels(loaded.Definitions) != len(sampleDefinitions) { + return false + } + prevOutcome = loaded + return true + }, 5*time.Second, 100*time.Millisecond, "channel definitions should be persisted") + require.NotNil(t, prevOutcome, "previous outcome should be loaded from database") + + // Simulate plugin behavior: call Definitions() with merged channel definitions from previous outcome + // Definitions() merges source definitions with prev and returns the result + // Persistence happens separately via persistLoop, which stores c.definitions.Sources + _ = cdc.Definitions(extractChannelDefinitions(prevOutcome.Definitions)) + + // Wait for persistence to complete after calling Definitions() with previous outcome + var pd *llotypes2.PersistedDefinitions + require.Eventually(t, func() bool { + loaded, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + if err != nil || loaded == nil { + return false + } + // Check if we have the expected number of channels across all sources + if countChannels(loaded.Definitions) != len(sampleDefinitions) { + return false + } + pd = loaded + return true + }, 5*time.Second, 100*time.Millisecond, "channel definitions should be persisted after calling Definitions() with previous outcome") + require.NotNil(t, pd) + assert.Equal(t, ETHMainnetChainSelector, pd.ChainSelector) + assert.Equal(t, configStoreAddress, pd.Address) + // Verify the structure matches - extract and compare channel definitions + extractedDefs := extractChannelDefinitions(pd.Definitions) + assert.Len(t, extractedDefs, len(sampleDefinitions)) + for channelID, expectedDef := range sampleDefinitions { + actualDef, exists := extractedDefs[channelID] + assert.True(t, exists, "channel %d should exist", channelID) + assert.Equal(t, expectedDef.ReportFormat, actualDef.ReportFormat) + assert.Equal(t, expectedDef.Streams, actualDef.Streams) + } + assert.Equal(t, donID, pd.DonID) + // persist() stores c.definitions.Sources (source definitions) to the database. + // The version comes from c.definitions.Version which is set from the latest owner trigger in the source definitions. + assert.GreaterOrEqual(t, pd.Version, prevOutcome.Version, "version should be >= previous outcome version") + }) + + t.Run("new cdc with same config should load from DB", func(t *testing.T) { + // fromBlock far in the future to ensure logs are not used + cdc2 := channeldefinitions.NewChannelDefinitionCache(logger.NullLogger, orm, client, lp, configStoreAddress, donID, 1000) + servicetest.Run(t, cdc2) + // Load the persisted source definitions from DB + // The cache loads source definitions (map[uint32]types.SourceDefinition) from the database + // Definitions(prev) merges the loaded source definitions from c.definitions.Sources with prev + // Since source definitions are loaded from DB for a new cache, it should merge them with prev + loaded, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + require.NoError(t, err) + require.NotNil(t, loaded) + require.Equal(t, sampleDefinitions, extractChannelDefinitions(loaded.Definitions)) + }) + }) + + t.Run("new log with invalid channel definitions URL does not affect old channel definitions", func(t *testing.T) { + // clear the log messages + observedLogs.TakeAll() + { + url := "not a real URL" + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + client.SetResponseForURL(url, nil, errors.New("failed; not a real URL")) + backend.Commit() + } + + testutils.WaitForLogMessageWithField(t, observedLogs, "Error while fetching channel definitions", "err", "invalid URI for request") + }) + + t.Run("new valid definitions set on-chain, should update", func(t *testing.T) { + // clear the log messages before waiting for new ones + observedLogs.TakeAll() + + { + // add a new definition, it should get loaded + sampleDefinitions[3] = llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + { + StreamID: 6, + Aggregator: llotypes.AggregatorMedian, + }, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + } + var err error + sampleDefinitionsJSON, err = json.MarshalIndent(sampleDefinitions, "", " ") + require.NoError(t, err) + sampleDefinitionsSHA = sha3.Sum256(sampleDefinitionsJSON) + rc := NewMockReadCloser(sampleDefinitionsJSON) + url := "http://example.com/foo5" + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + + backend.Commit() + } + + // Wait for the log trigger to be processed + le := testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", "http://example.com/foo5") + fields := le.ContextMap() + assert.Contains(t, fields, "source") + assert.Contains(t, fields, "url") + assert.Contains(t, fields, "sha") + assert.Contains(t, fields, "blockNum") + assert.NotContains(t, fields, "err") + + assert.Equal(t, channeldefinitions.SourceOwner, fields["source"]) + assert.Equal(t, "http://example.com/foo5", fields["url"]) + assert.Equal(t, hex.EncodeToString(sampleDefinitionsSHA[:]), fields["sha"]) + + // Wait for definitions to be fetched and merged + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) == len(sampleDefinitions) + }, 5*time.Second, 100*time.Millisecond, "definitions should be updated") + + assert.Equal(t, sampleDefinitions, cdc.Definitions(llotypes.ChannelDefinitions{})) + }) + + t.Run("latest channel definitions are persisted and overwrite previous value", func(t *testing.T) { + // Wait for initial persistence to complete (persistLoop periodically persists source definitions) + var prev *llotypes2.PersistedDefinitions + require.Eventually(t, func() bool { + loaded, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + if err != nil || loaded == nil { + return false + } + // Check if we have the expected number of channels across all sources + // Definitions is a map[uint32]types.SourceDefinition, so we need to count channels across all sources + if countChannels(loaded.Definitions) != len(sampleDefinitions) { + return false + } + prev = loaded + return true + }, 5*time.Second, 100*time.Millisecond, "latest channel definitions should be loaded from database") + require.NotNil(t, prev, "latest channel definitions should be loaded from database") + + // Simulate plugin behavior: call Definitions() with merged channel definitions from previous outcome + // Definitions() merges source definitions with prev and returns the result + // Persistence happens separately via persistLoop, which stores c.definitions.Sources + _ = cdc.Definitions(extractChannelDefinitions(prev.Definitions)) + + // Wait for persistence to complete after calling Definitions() with previous outcome + var pd *llotypes2.PersistedDefinitions + require.Eventually(t, func() bool { + loaded, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + if err != nil || loaded == nil { + return false + } + // Check if we have the expected number of channels across all sources + if countChannels(loaded.Definitions) != len(sampleDefinitions) { + return false + } + pd = loaded + return true + }, 5*time.Second, 100*time.Millisecond, "channel definitions should be persisted after calling Definitions() with previous outcome") + require.NotNil(t, pd) + assert.Equal(t, ETHMainnetChainSelector, pd.ChainSelector) + assert.Equal(t, configStoreAddress, pd.Address) + // Verify the structure matches - extract channel definitions from persisted source definitions + extractedDefs := extractChannelDefinitions(pd.Definitions) + assert.Len(t, extractedDefs, len(sampleDefinitions)) + for channelID, expectedDef := range sampleDefinitions { + actualDef, exists := extractedDefs[channelID] + assert.True(t, exists, "channel %d should exist", channelID) + assert.Equal(t, expectedDef.ReportFormat, actualDef.ReportFormat) + assert.Equal(t, expectedDef.Streams, actualDef.Streams) + } + assert.Equal(t, donID, pd.DonID) + // persist() stores c.definitions.Sources (source definitions) to the database. + // The version comes from c.definitions.Version which is set from the latest owner trigger in the source definitions. + assert.GreaterOrEqual(t, pd.Version, prev.Version, "version should be >= previous outcome version") }) - { - rc := NewMockReadCloser(invalidDefinitions) - client.SetResponse(&http.Response{ + t.Run("migration from SingleChannelDefinitionsFormat to MultiChannelDefinitionsFormat preserves metadata", func(t *testing.T) { + migrationDonID := rand.Uint32() + migrationVersion := uint32(1) + migrationBlockNum := int64(1) + migrationChainSelector := ETHMainnetChainSelector + + // Create old format definitions (just ChannelDefinitions, no source wrapper) + oldFormatDefs := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + 2: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 3, Aggregator: llotypes.AggregatorQuote}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + oldFormatJSON, err := json.Marshal(oldFormatDefs) + require.NoError(t, err) + + pgtest.MustExec(t, db, ` + INSERT INTO channel_definitions(addr, chain_selector, don_id, definitions, block_num, version, updated_at, format) + VALUES ($1, $2, $3, $4, $5, $6, NOW(), $7) + `, configStoreAddress, migrationChainSelector, migrationDonID, oldFormatJSON, migrationBlockNum, migrationVersion, channeldefinitions.SingleChannelDefinitionsFormat) + + // Verify old format data in database + oldPD, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, migrationDonID) + require.NoError(t, err) + require.NotNil(t, oldPD) + assert.Equal(t, migrationChainSelector, oldPD.ChainSelector) + assert.Equal(t, configStoreAddress, oldPD.Address) + assert.Equal(t, migrationDonID, oldPD.DonID) + assert.Equal(t, migrationVersion, oldPD.Version) + assert.Equal(t, migrationBlockNum, oldPD.BlockNum) + assert.Equal(t, channeldefinitions.SingleChannelDefinitionsFormat, oldPD.Format) + + // Create a new cache - it should load the metadata but not the definitions + cdcMigration := channeldefinitions.NewChannelDefinitionCache(logger.NullLogger, orm, client, lp, configStoreAddress, migrationDonID, 0, channeldefinitions.WithLogPollInterval(100*time.Millisecond)) + servicetest.Run(t, cdcMigration) + + // Verify that metadata was loaded but definitions are empty + // The cache should have loaded Version and BlockNum from the old format data + defs := cdcMigration.Definitions(llotypes.ChannelDefinitions{}) + assert.Empty(t, defs, "definitions should be empty when format is SingleChannelDefinitionsFormat") + + // Now trigger new definitions to be persisted (this will migrate to new format) + // Set up new definitions that will be fetched + newDefinitions := llotypes.ChannelDefinitions{ + 1: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + 2: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 3, Aggregator: llotypes.AggregatorQuote}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + 3: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 4, Aggregator: llotypes.AggregatorMedian}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + newDefinitionsJSON, err := json.MarshalIndent(newDefinitions, "", " ") + require.NoError(t, err) + newDefinitionsSHA := sha3.Sum256(newDefinitionsJSON) + + // Set up HTTP client to return new definitions + rc := NewMockReadCloser(newDefinitionsJSON) + url := "http://example.com/migration-test.json" + client.SetResponseForURL(url, &http.Response{ StatusCode: 200, Body: rc, }, nil) - url := "http://example.com/foo" - require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) - + // Trigger new channel definitions on-chain + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, migrationDonID, url, newDefinitionsSHA))) backend.Commit() + + // Wait for definitions to be fetched and persisted + require.Eventually(t, func() bool { + defs := cdcMigration.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) == len(newDefinitions) + }, 5*time.Second, 100*time.Millisecond, "new definitions should be available") + + // Wait for persistence to complete + var migratedPD *llotypes2.PersistedDefinitions + require.Eventually(t, func() bool { + var loaded *llotypes2.PersistedDefinitions + if loaded, err = orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, migrationDonID); err != nil || loaded == nil { + return false + } + // Check that format has been migrated + if loaded.Format != channeldefinitions.MultiChannelDefinitionsFormat { + return false + } + migratedPD = loaded + return true + }, 5*time.Second, 100*time.Millisecond, "definitions should be migrated to MultiChannelDefinitionsFormat") + + require.NotNil(t, migratedPD) + + // Verify that all metadata is preserved + assert.Equal(t, migrationChainSelector, migratedPD.ChainSelector, "ChainSelector should be preserved") + assert.Equal(t, configStoreAddress, migratedPD.Address, "Address should be preserved") + assert.Equal(t, migrationDonID, migratedPD.DonID, "DonID should be preserved") + // Version should be preserved or updated (not reset to 0) + assert.GreaterOrEqual(t, migratedPD.Version, migrationVersion, "Version should be preserved or updated, not reset") + // BlockNum should be preserved or updated (not reset to 0) + assert.GreaterOrEqual(t, migratedPD.BlockNum, migrationBlockNum, "BlockNum should be preserved or updated, not reset") + + // Verify format has been migrated + assert.Equal(t, channeldefinitions.MultiChannelDefinitionsFormat, migratedPD.Format, "Format should be migrated to MultiChannelDefinitionsFormat") + + // Verify definitions are in new format (map[uint32]SourceDefinition) + var newFormatDefs map[uint32]llotypes2.SourceDefinition + err = json.Unmarshal(migratedPD.Definitions, &newFormatDefs) + require.NoError(t, err) + require.NotEmpty(t, newFormatDefs, "definitions should be in new format") + + // Verify the definitions contain the expected channels + extractedDefs := extractChannelDefinitions(migratedPD.Definitions) + assert.Len(t, extractedDefs, len(newDefinitions)) + for channelID, expectedDef := range newDefinitions { + actualDef, exists := extractedDefs[channelID] + assert.True(t, exists, "channel %d should exist", channelID) + assert.Equal(t, expectedDef.ReportFormat, actualDef.ReportFormat) + assert.Equal(t, expectedDef.Streams, actualDef.Streams) + } + }) +} + +func Test_ChannelDefinitionCache_OwnerAndAdderMerging(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + db := pgtest.NewSqlxDB(t) + const ETHMainnetChainSelector uint64 = 5009297550715157269 + orm := llo.NewChainScopedORM(db, ETHMainnetChainSelector) + + steve := evmtestutils.MustNewSimTransactor(t) // config contract deployer and owner + // Create adder accounts before creating backend + adder1 := evmtestutils.MustNewSimTransactor(t) + adder2 := evmtestutils.MustNewSimTransactor(t) + genesisData := types.GenesisAlloc{ + steve.From: {Balance: assets.Ether(1000).ToInt()}, + adder1.From: {Balance: assets.Ether(1000).ToInt()}, + adder2.From: {Balance: assets.Ether(1000).ToInt()}, } + backend := cltest.NewSimulatedBackend(t, genesisData, ethconfig.Defaults.Miner.GasCeil) + backend.Commit() // ensure starting block number at least 1 - t.Run("with sha mismatch, should not update", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + ethClient := client.NewSimulatedBackendClient(t, backend, testutils.SimulatedChainID) - testutils.WaitForLogMessage(t, observedLogs, "Got new channel definitions from chain") - le := testutils.WaitForLogMessage(t, observedLogs, "Error while fetching channel definitions") - fields := le.ContextMap() - assert.Contains(t, fields, "err") - assert.Equal(t, fmt.Sprintf("SHA3 mismatch: expected %x, got %x", sampleDefinitionsSHA, invalidDefinitionsSHA), fields["err"]) + configStoreAddress, _, configStoreContract, err := channel_config_store.DeployChannelConfigStore(steve, backend.Client()) + require.NoError(t, err) + + backend.Commit() + + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 1, + BackfillBatchSize: 3, + RPCBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + ht := headstest.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller( + logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, ht, lpOpts) + servicetest.Run(t, lp) + + client := &mockHTTPClient{} + donID := rand.Uint32() + + cdc := channeldefinitions.NewChannelDefinitionCache(lggr, orm, client, lp, configStoreAddress, donID, 0, channeldefinitions.WithLogPollInterval(100*time.Millisecond)) + servicetest.Run(t, cdc) + + // Configure adders on the contract + // Adder IDs start from 1000 + adder1ID := uint32(1001) + adder2ID := uint32(1002) + + require.NoError(t, utils.JustError(configStoreContract.SetChannelAdderAddress(steve, adder1ID, adder1.From))) + backend.Commit() + require.NoError(t, utils.JustError(configStoreContract.SetChannelAdderAddress(steve, adder2ID, adder2.From))) + backend.Commit() + + // Enable adders + require.NoError(t, utils.JustError(configStoreContract.SetChannelAdder(steve, donID, adder1ID, true))) + backend.Commit() + require.NoError(t, utils.JustError(configStoreContract.SetChannelAdder(steve, donID, adder2ID, true))) + backend.Commit() + + t.Run("adder can add new channels", func(t *testing.T) { + observedLogs.TakeAll() + + adder1Definitions := llotypes.ChannelDefinitions{ + 100: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + 101: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adder1DefinitionsJSON, err := json.MarshalIndent(adder1Definitions, "", " ") + require.NoError(t, err) + adder1DefinitionsSHA := sha3.Sum256(adder1DefinitionsJSON) + + url := "http://example.com/adder1-defs.json" + rc := NewMockReadCloser(adder1DefinitionsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adder1DefinitionsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) - assert.Empty(t, cdc.Definitions()) + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) >= 2 + }, 5*time.Second, 100*time.Millisecond, "adder definitions should be available") + + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, adder1Definitions[100], defs[100]) + assert.Equal(t, adder1Definitions[101], defs[101]) }) - { - rc := NewMockReadCloser(invalidDefinitions) - client.SetResponse(&http.Response{ + t.Run("adder cannot overwrite existing owner channels", func(t *testing.T) { + observedLogs.TakeAll() + + // Owner sets channel definitions first + ownerDefinitions := llotypes.ChannelDefinitions{ + 200: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + ownerDefinitionsJSON, err := json.MarshalIndent(ownerDefinitions, "", " ") + require.NoError(t, err) + ownerDefinitionsSHA := sha3.Sum256(ownerDefinitionsJSON) + + url := "http://example.com/owner-defs.json" + rc := NewMockReadCloser(ownerDefinitionsJSON) + client.SetResponseForURL(url, &http.Response{ StatusCode: 200, Body: rc, }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, ownerDefinitionsSHA))) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) >= 1 && defs[200].Source == channeldefinitions.SourceOwner + }, 5*time.Second, 100*time.Millisecond, "owner definitions should be available") + + // Now adder tries to add the same channel ID + observedLogs.TakeAll() - url := "http://example.com/foo" - require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, invalidDefinitionsSHA))) + adderAttemptDefinitions := llotypes.ChannelDefinitions{ + 200: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 999, Aggregator: llotypes.AggregatorQuote}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adderAttemptDefinitionsJSON, err := json.MarshalIndent(adderAttemptDefinitions, "", " ") + require.NoError(t, err) + adderAttemptDefinitionsSHA := sha3.Sum256(adderAttemptDefinitionsJSON) + + url2 := "http://example.com/adder-attempt.json" + rc = NewMockReadCloser(adderAttemptDefinitionsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url2, adderAttemptDefinitionsSHA) + require.NoError(t, err) backend.Commit() - } - t.Run("after correcting sha with new channel definitions set on-chain, but with invalid JSON at url, should not update", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) - testutils.WaitForLogMessage(t, observedLogs, "Got new channel definitions from chain") - testutils.WaitForLogMessageWithField(t, observedLogs, "Error while fetching channel definitions", "err", "failed to decode JSON: invalid character '{' looking for beginning of object key string") + // Wait a bit for processing + time.Sleep(500 * time.Millisecond) - assert.Empty(t, cdc.Definitions()) + // Verify adder's definition was skipped - owner's definition should still be there + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, channeldefinitions.SourceOwner, defs[200].Source, "channel 200 should still be from owner") + assert.Equal(t, ownerDefinitions[200].Streams, defs[200].Streams, "channel 200 should have owner's streams") + + // Check for conflict warning log + testutils.WaitForLogMessageWithField(t, observedLogs, "channel adder conflict", + "channelID", "200") }) - { - rc := NewMockReadCloser([]byte("not found")) - client.SetResponse(&http.Response{ - StatusCode: 404, + t.Run("adder cannot overwrite existing adder channels", func(t *testing.T) { + observedLogs.TakeAll() + + // First adder adds a channel + adder1Defs := llotypes.ChannelDefinitions{ + 300: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adder1DefsJSON, err := json.MarshalIndent(adder1Defs, "", " ") + require.NoError(t, err) + adder1DefsSHA := sha3.Sum256(adder1DefsJSON) + + url := "http://example.com/adder1-channel300.json" + rc := NewMockReadCloser(adder1DefsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, Body: rc, }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adder1DefsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return defs[300].Source == adder1ID + }, 5*time.Second, 100*time.Millisecond, "adder1 channel 300 should be available") + + // Second adder tries to add the same channel ID + observedLogs.TakeAll() - url := "http://example.com/foo3" - require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + adder2Defs := llotypes.ChannelDefinitions{ + 300: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 999, Aggregator: llotypes.AggregatorQuote}, + }, + Source: adder2ID, + Tombstone: false, + }, + } + + adder2DefsJSON, err := json.MarshalIndent(adder2Defs, "", " ") + require.NoError(t, err) + adder2DefsSHA := sha3.Sum256(adder2DefsJSON) + + url2 := "http://example.com/adder2-channel300.json" + rc = NewMockReadCloser(adder2DefsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder2, donID, adder2ID, url2, adder2DefsSHA) + require.NoError(t, err) backend.Commit() - } - t.Run("if server returns 404, should not update", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) - testutils.WaitForLogMessageWithField(t, observedLogs, "Error while fetching channel definitions", "err", "got error from http://example.com/foo3: (status code: 404, response body: not found)") + // Wait a bit for processing + time.Sleep(500 * time.Millisecond) + + // Verify second adder's definition was skipped + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, adder1ID, defs[300].Source, "channel 300 should still be from adder1") + assert.Equal(t, adder1Defs[300].Streams, defs[300].Streams, "channel 300 should have adder1's streams") + + // Check for conflict warning log + testutils.WaitForLogMessageWithField(t, observedLogs, "channel adder conflict", + "channelID", "300") }) - { - rc := NewMockReadCloser([]byte{}) - client.SetResponse(&http.Response{ + t.Run("adder cannot tombstone channels", func(t *testing.T) { + observedLogs.TakeAll() + + // Adder tries to add a channel with Tombstone: true + adderTombstoneDefs := llotypes.ChannelDefinitions{ + 400: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: true, // Adders cannot tombstone + }, + } + + adderTombstoneDefsJSON, err := json.MarshalIndent(adderTombstoneDefs, "", " ") + require.NoError(t, err) + adderTombstoneDefsSHA := sha3.Sum256(adderTombstoneDefsJSON) + + url := "http://example.com/adder-tombstone.json" + rc := NewMockReadCloser(adderTombstoneDefsJSON) + client.SetResponseForURL(url, &http.Response{ StatusCode: 200, Body: rc, }, nil) - } + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adderTombstoneDefsSHA) + require.NoError(t, err) + backend.Commit() - t.Run("if server starts returning empty body, still does not update", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) + + // Wait a bit for processing + time.Sleep(500 * time.Millisecond) - testutils.WaitForLogMessageWithField(t, observedLogs, "Error while fetching channel definitions", "err", fmt.Sprintf("SHA3 mismatch: expected %x, got %x", sampleDefinitionsSHA, sha3.Sum256([]byte{}))) + // Verify tombstone channel was skipped + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + _, exists := defs[400] + assert.False(t, exists, "channel 400 should not exist (tombstone skipped)") + + // Check for tombstone warning log + testutils.WaitForLogMessageWithField(t, observedLogs, "invalid channel tombstone", + "channelID", "400") }) - { - rc := NewMockReadCloser(sampleDefinitionsJSON) - client.SetResponse(&http.Response{ + t.Run("owner can overwrite adder channels", func(t *testing.T) { + observedLogs.TakeAll() + + // Adder adds a channel first + adderDefs := llotypes.ChannelDefinitions{ + 500: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adderDefsJSON, err := json.MarshalIndent(adderDefs, "", " ") + require.NoError(t, err) + adderDefsSHA := sha3.Sum256(adderDefsJSON) + + url := "http://example.com/adder-channel500.json" + rc := NewMockReadCloser(adderDefsJSON) + client.SetResponseForURL(url, &http.Response{ StatusCode: 200, Body: rc, }, nil) - } + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adderDefsSHA) + require.NoError(t, err) + backend.Commit() - t.Run("when URL starts returning valid JSON, updates even without needing new logs", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) - le := testutils.WaitForLogMessage(t, observedLogs, "Set new channel definitions") - fields := le.ContextMap() - assert.Contains(t, fields, "version") - assert.Contains(t, fields, "url") - assert.Contains(t, fields, "sha") - assert.Contains(t, fields, "donID") - assert.NotContains(t, fields, "err") + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return defs[500].Source == adder1ID + }, 5*time.Second, 100*time.Millisecond, "adder channel 500 should be available") - assert.Equal(t, uint32(3), fields["version"]) - assert.Equal(t, "http://example.com/foo3", fields["url"]) - assert.Equal(t, hex.EncodeToString(sampleDefinitionsSHA[:]), fields["sha"]) - assert.Equal(t, donID, fields["donID"]) + // Owner sets new definitions that include the same channel ID with different values + observedLogs.TakeAll() - assert.Equal(t, sampleDefinitions, cdc.Definitions()) + ownerDefs := llotypes.ChannelDefinitions{ + 500: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 999, Aggregator: llotypes.AggregatorQuote}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } - t.Run("latest channel definitions are persisted", func(t *testing.T) { - pd, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) - require.NoError(t, err) - assert.Equal(t, ETHMainnetChainSelector, pd.ChainSelector) - assert.Equal(t, configStoreAddress, pd.Address) - assert.Equal(t, sampleDefinitions, pd.Definitions) - assert.Equal(t, donID, pd.DonID) - assert.Equal(t, uint32(3), pd.Version) - }) + ownerDefsJSON, err := json.MarshalIndent(ownerDefs, "", " ") + require.NoError(t, err) + ownerDefsSHA := sha3.Sum256(ownerDefsJSON) - t.Run("new cdc with same config should load from DB", func(t *testing.T) { - // fromBlock far in the future to ensure logs are not used - cdc2 := channeldefinitions.NewChannelDefinitionCache(lggr, orm, client, lp, configStoreAddress, donID, 1000) - servicetest.Run(t, cdc2) + url2 := "http://example.com/owner-overwrite.json" + rc = NewMockReadCloser(ownerDefsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url2, ownerDefsSHA))) + backend.Commit() - assert.Equal(t, sampleDefinitions, cdc.Definitions()) - }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return defs[500].Source == channeldefinitions.SourceOwner + }, 5*time.Second, 100*time.Millisecond, "owner should have overwritten channel 500") + + // Verify owner's definition overwrote the adder's + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, channeldefinitions.SourceOwner, defs[500].Source, "channel 500 should be from owner") + assert.Equal(t, ownerDefs[500].Streams, defs[500].Streams, "channel 500 should have owner's streams") }) - { - url := "not a real URL" - require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + t.Run("owner cannot implicitly remove channels", func(t *testing.T) { + observedLogs.TakeAll() + + // Start with channels from owner and adders + ownerDefs := llotypes.ChannelDefinitions{ + 600: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + 601: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + ownerDefsJSON, err := json.MarshalIndent(ownerDefs, "", " ") + require.NoError(t, err) + ownerDefsSHA := sha3.Sum256(ownerDefsJSON) + url := "http://example.com/owner-channels600-601.json" + rc := NewMockReadCloser(ownerDefsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, ownerDefsSHA))) backend.Commit() - client.SetResponse(nil, errors.New("failed; not a real URL")) - } + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) - t.Run("new log with invalid channel definitions URL does not affect old channel definitions", func(t *testing.T) { - // clear the log messages - t.Cleanup(func() { observedLogs.TakeAll() }) + // Adder adds a channel + observedLogs.TakeAll() - le := testutils.WaitForLogMessage(t, observedLogs, "Error while fetching channel definitions") - fields := le.ContextMap() - assert.Contains(t, fields, "err") - assert.Equal(t, "error making http request: failed; not a real URL", fields["err"]) + adderDefs := llotypes.ChannelDefinitions{ + 602: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 3, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adderDefsJSON, err := json.MarshalIndent(adderDefs, "", " ") + require.NoError(t, err) + adderDefsSHA := sha3.Sum256(adderDefsJSON) + + url2 := "http://example.com/adder-channel602.json" + rc = NewMockReadCloser(adderDefsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url2, adderDefsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return len(defs) >= 3 + }, 5*time.Second, 100*time.Millisecond, "all channels should be available") + + // Owner sets new definitions that exclude channel 600 + observedLogs.TakeAll() + + ownerDefsUpdated := llotypes.ChannelDefinitions{ + 601: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + // Channel 600 is excluded - should be removed + } + + ownerDefsUpdatedJSON, err := json.MarshalIndent(ownerDefsUpdated, "", " ") + require.NoError(t, err) + ownerDefsUpdatedSHA := sha3.Sum256(ownerDefsUpdatedJSON) + + url3 := "http://example.com/owner-removed-600.json" + rc = NewMockReadCloser(ownerDefsUpdatedJSON) + client.SetResponseForURL(url3, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url3, ownerDefsUpdatedSHA))) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url3) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(ownerDefs) + _, has600 := defs[600] + _, has601 := defs[601] + _, has602 := defs[602] + return has600 && has601 && has602 + }, 5*time.Second, 100*time.Millisecond, "channel 600, 601 and 602 should still be present") }) - { - // add a new definition, it should get loaded - sampleDefinitions[3] = llotypes.ChannelDefinition{ - ReportFormat: llotypes.ReportFormatJSON, - Streams: []llotypes.Stream{ - { - StreamID: 6, - Aggregator: llotypes.AggregatorMedian, + t.Run("owner can remove channels explicitly", func(t *testing.T) { + observedLogs.TakeAll() + + // Start with channels from owner and adders + ownerDefs := llotypes.ChannelDefinitions{ + 600: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + 601: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + // Owner sets new definitions that exclude channel 600 + ownerDefsUpdated := llotypes.ChannelDefinitions{ + 600: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, }, + Source: channeldefinitions.SourceOwner, + Tombstone: true, }, } - var err error - sampleDefinitionsJSON, err = json.MarshalIndent(sampleDefinitions, "", " ") + + ownerDefsUpdatedJSON, err := json.MarshalIndent(ownerDefsUpdated, "", " ") require.NoError(t, err) - sampleDefinitionsSHA = sha3.Sum256(sampleDefinitionsJSON) - rc := NewMockReadCloser(sampleDefinitionsJSON) - client.SetResponse(&http.Response{ + ownerDefsUpdatedSHA := sha3.Sum256(ownerDefsUpdatedJSON) + + url3 := "http://example.com/owner-removed-600.json" + rc := NewMockReadCloser(ownerDefsUpdatedJSON) + client.SetResponseForURL(url3, &http.Response{ StatusCode: 200, Body: rc, }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url3, ownerDefsUpdatedSHA))) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url3) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(ownerDefs) + def600 := defs[600] + _, has601 := defs[601] + _, has602 := defs[602] + return def600.Tombstone && has601 && has602 + }, 5*time.Second, 100*time.Millisecond, "channel 600 should be removed, 601 and 602 should still be present") + }) + + t.Run("multiple adders can add different channels", func(t *testing.T) { + observedLogs.TakeAll() + + // Adder1 adds channels + adder1Defs := llotypes.ChannelDefinitions{ + 700: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + } - url := "http://example.com/foo5" - require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url, sampleDefinitionsSHA))) + adder1DefsJSON, err := json.MarshalIndent(adder1Defs, "", " ") + require.NoError(t, err) + adder1DefsSHA := sha3.Sum256(adder1DefsJSON) + url := "http://example.com/adder1-channel700.json" + rc := NewMockReadCloser(adder1DefsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adder1DefsSHA) + require.NoError(t, err) backend.Commit() - } - t.Run("successfully updates to new channel definitions with new log", func(t *testing.T) { - t.Cleanup(func() { observedLogs.TakeAll() }) + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) - le := testutils.WaitForLogMessage(t, observedLogs, "Set new channel definitions") - fields := le.ContextMap() - assert.Contains(t, fields, "version") - assert.Contains(t, fields, "url") - assert.Contains(t, fields, "sha") - assert.Contains(t, fields, "donID") - assert.NotContains(t, fields, "err") + // Adder2 adds different channels + observedLogs.TakeAll() - assert.Equal(t, uint32(5), fields["version"]) - assert.Equal(t, "http://example.com/foo5", fields["url"]) - assert.Equal(t, hex.EncodeToString(sampleDefinitionsSHA[:]), fields["sha"]) - assert.Equal(t, donID, fields["donID"]) + adder2Defs := llotypes.ChannelDefinitions{ + 701: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 2, Aggregator: llotypes.AggregatorMode}, + }, + Source: adder2ID, + Tombstone: false, + }, + } + + adder2DefsJSON, err := json.MarshalIndent(adder2Defs, "", " ") + require.NoError(t, err) + adder2DefsSHA := sha3.Sum256(adder2DefsJSON) + + url2 := "http://example.com/adder2-channel701.json" + rc = NewMockReadCloser(adder2DefsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder2, donID, adder2ID, url2, adder2DefsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) - assert.Equal(t, sampleDefinitions, cdc.Definitions()) + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + _, has700 := defs[700] + _, has701 := defs[701] + return has700 && has701 + }, 5*time.Second, 100*time.Millisecond, "both adder channels should be available") + + // Verify all channels from both adders are present + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, adder1ID, defs[700].Source, "channel 700 should be from adder1") + assert.Equal(t, adder2ID, defs[701].Source, "channel 701 should be from adder2") + assert.Equal(t, adder1Defs[700].Streams, defs[700].Streams, "channel 700 should have adder1's streams") + assert.Equal(t, adder2Defs[701].Streams, defs[701].Streams, "channel 701 should have adder2's streams") }) - t.Run("latest channel definitions are persisted and overwrite previous value", func(t *testing.T) { - pd, err := orm.LoadChannelDefinitions(testutils.Context(t), configStoreAddress, donID) + t.Run("adder limit enforcement", func(t *testing.T) { + observedLogs.TakeAll() + + // Create definitions with more than MaxChannelsPerAdder (100) channels + tooManyDefs := make(llotypes.ChannelDefinitions) + for i := uint32(800); i < 800+channeldefinitions.MaxChannelsPerAdder+1; i++ { + tooManyDefs[i] = llotypes.ChannelDefinition{ + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: i, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + } + } + + tooManyDefsJSON, err := json.MarshalIndent(tooManyDefs, "", " ") require.NoError(t, err) - assert.Equal(t, ETHMainnetChainSelector, pd.ChainSelector) - assert.Equal(t, configStoreAddress, pd.Address) - assert.Equal(t, sampleDefinitions, pd.Definitions) - assert.Equal(t, donID, pd.DonID) - assert.Equal(t, uint32(5), pd.Version) + tooManyDefsSHA := sha3.Sum256(tooManyDefsJSON) + + url := "http://example.com/too-many-channels.json" + rc := NewMockReadCloser(tooManyDefsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, tooManyDefsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) + + // Wait a bit for processing + time.Sleep(500 * time.Millisecond) + + // Call Definitions() to trigger the merge and error logging + _ = cdc.Definitions(llotypes.ChannelDefinitions{}) + + // Verify error is logged and channels are not merged + testutils.WaitForLogMessageWithField(t, observedLogs, "adder limit exceeded, skipping remaining definitions for source", + "source", strconv.FormatUint(uint64(adder1ID), 10)) + + // Verify no channels above the limit were added + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + var addedDefinitionsCount int + for _, def := range defs { + if def.Source == adder1ID { + addedDefinitionsCount++ + } + } + require.Equal(t, channeldefinitions.MaxChannelsPerAdder, addedDefinitionsCount) + }) + + t.Run("deterministic processing order", func(t *testing.T) { + observedLogs.TakeAll() + + // Add definitions from owner and adders at different block numbers + // We'll add them in a specific order and verify the final result respects block/log ordering + + // First, adder1 adds channel 900 + adder1Defs := llotypes.ChannelDefinitions{ + 900: { + ReportFormat: llotypes.ReportFormatJSON, + Streams: []llotypes.Stream{ + {StreamID: 1, Aggregator: llotypes.AggregatorMedian}, + }, + Source: adder1ID, + Tombstone: false, + }, + } + + adder1DefsJSON, err := json.MarshalIndent(adder1Defs, "", " ") + require.NoError(t, err) + adder1DefsSHA := sha3.Sum256(adder1DefsJSON) + + url := "http://example.com/adder1-channel900.json" + rc := NewMockReadCloser(adder1DefsJSON) + client.SetResponseForURL(url, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + _, err = configStoreContract.AddChannelDefinitions(adder1, donID, adder1ID, url, adder1DefsSHA) + require.NoError(t, err) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url) + + // Then owner adds channel 900 (should overwrite) + observedLogs.TakeAll() + + ownerDefs := llotypes.ChannelDefinitions{ + 900: { + ReportFormat: llotypes.ReportFormatEVMPremiumLegacy, + Streams: []llotypes.Stream{ + {StreamID: 999, Aggregator: llotypes.AggregatorQuote}, + }, + Source: channeldefinitions.SourceOwner, + Tombstone: false, + }, + } + + ownerDefsJSON, err := json.MarshalIndent(ownerDefs, "", " ") + require.NoError(t, err) + ownerDefsSHA := sha3.Sum256(ownerDefsJSON) + + url2 := "http://example.com/owner-channel900.json" + rc = NewMockReadCloser(ownerDefsJSON) + client.SetResponseForURL(url2, &http.Response{ + StatusCode: 200, + Body: rc, + }, nil) + require.NoError(t, utils.JustError(configStoreContract.SetChannelDefinitions(steve, donID, url2, ownerDefsSHA))) + backend.Commit() + + testutils.WaitForLogMessageWithField(t, observedLogs, "Got new logs", + "url", url2) + + require.Eventually(t, func() bool { + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + return defs[900].Source == channeldefinitions.SourceOwner + }, 5*time.Second, 100*time.Millisecond, "owner should have overwritten channel 900") + + // Verify final result respects ordering (owner's definition should win) + defs := cdc.Definitions(llotypes.ChannelDefinitions{}) + assert.Equal(t, channeldefinitions.SourceOwner, defs[900].Source, "channel 900 should be from owner (processed later)") + assert.Equal(t, ownerDefs[900].Streams, defs[900].Streams, "channel 900 should have owner's streams") }) } diff --git a/core/store/migrate/migrations/0285_llo_channel_definitions_add_format.sql b/core/store/migrate/migrations/0285_llo_channel_definitions_add_format.sql new file mode 100644 index 00000000000..cbf058d539a --- /dev/null +++ b/core/store/migrate/migrations/0285_llo_channel_definitions_add_format.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE + channel_definitions +ADD + COLUMN format bigint; +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +ALTER TABLE + channel_definitions DROP COLUMN format; +-- +goose StatementEnd diff --git a/deployment/go.mod b/deployment/go.mod index 564e7e68b54..ca1fc86b614 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -42,10 +42,10 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.1.1-solana.0.20251128020529-88d93b01d749 github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250912190424-fd2e35d7deb5 github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20251021173435-e86785845942 github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20251124151448-0448aefdaab9 github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 @@ -416,7 +416,7 @@ require ( github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e // indirect github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 // indirect github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.7 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 // indirect github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 // indirect diff --git a/deployment/go.sum b/deployment/go.sum index a1c9b1fdcad..197115f00fb 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -1358,20 +1358,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/go.mod b/go.mod index f38a2980c27..da2e45813ba 100644 --- a/go.mod +++ b/go.mod @@ -85,11 +85,11 @@ require ( github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250912190424-fd2e35d7deb5 github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5 github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 + github.com/smartcontractkit/chainlink-data-streams v0.1.7 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 diff --git a/go.sum b/go.sum index 042782b34ce..87cf6b86c93 100644 --- a/go.sum +++ b/go.sum @@ -1164,18 +1164,18 @@ github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250 github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5/go.mod h1:xtZNi6pOKdC3sLvokDvXOhgHzT+cyBqH/gWwvxTxqrg= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index c9a9a01932c..2c929144d13 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -51,10 +51,10 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.1.1-solana.0.20251128020529-88d93b01d749 github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250912190424-fd2e35d7deb5 github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 github.com/smartcontractkit/chainlink-sui v0.0.0-20251205161630-88314452254c github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20251205161630-88314452254c @@ -498,7 +498,7 @@ require ( github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e // indirect github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 // indirect github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.7 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 // indirect github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 7ce4fdb5c27..972961d5a92 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1601,20 +1601,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 50b1cc68d6e..94686e81d96 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -32,10 +32,10 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.1.1-solana.0.20251128020529-88d93b01d749 github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250912190424-fd2e35d7deb5 github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-testing-framework/framework v0.12.1 github.com/smartcontractkit/chainlink-testing-framework/havoc v1.50.5 github.com/smartcontractkit/chainlink-testing-framework/lib v1.54.7 @@ -483,7 +483,7 @@ require ( github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e // indirect github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 // indirect github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.7 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 // indirect github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 15dd050f49b..cd29b47fc8e 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1580,20 +1580,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/plugins/plugins.public.yaml b/plugins/plugins.public.yaml index b3a11f8b1c5..da232a980fe 100644 --- a/plugins/plugins.public.yaml +++ b/plugins/plugins.public.yaml @@ -45,7 +45,7 @@ plugins: streams: - moduleURI: "github.com/smartcontractkit/chainlink-data-streams" - gitRef: "v0.1.7-0.20251209111830-ccd12a5b2a19" + gitRef: "v0.1.7" installPath: "./mercury/cmd/chainlink-mercury" ton: diff --git a/system-tests/lib/go.mod b/system-tests/lib/go.mod index 6d8cc772397..8857b2a7cc2 100644 --- a/system-tests/lib/go.mod +++ b/system-tests/lib/go.mod @@ -33,10 +33,10 @@ require ( github.com/sethvargo/go-retry v0.3.0 github.com/smartcontractkit/chain-selectors v1.0.85 github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250912190424-fd2e35d7deb5 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20251124151448-0448aefdaab9 github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20251121223352-370eb61346d6 @@ -459,7 +459,7 @@ require ( github.com/smartcontractkit/chainlink-ccip/chains/solana/gobindings v0.0.0-20250912190424-fd2e35d7deb5 // indirect github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 // indirect github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.7 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 // indirect github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 // indirect github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 // indirect diff --git a/system-tests/lib/go.sum b/system-tests/lib/go.sum index 31f13b851df..1049998e0fc 100644 --- a/system-tests/lib/go.sum +++ b/system-tests/lib/go.sum @@ -1602,20 +1602,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index 781497d7617..0250c85c94f 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -46,10 +46,10 @@ require ( github.com/rs/zerolog v1.34.0 github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chain-selectors v1.0.85 - github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 - github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 + github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 + github.com/smartcontractkit/chainlink-data-streams v0.1.7 github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 - github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b + github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20251124151448-0448aefdaab9 github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 github.com/smartcontractkit/chainlink-protos/workflows/go v0.0.0-20251025021331-aa7746850cc4 diff --git a/system-tests/tests/go.sum b/system-tests/tests/go.sum index 7c2c73a42cf..071be514210 100644 --- a/system-tests/tests/go.sum +++ b/system-tests/tests/go.sum @@ -1799,20 +1799,20 @@ github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb github.com/smartcontractkit/chainlink-ccip/deployment v0.0.0-20251027185542-babb09e5363e/go.mod h1:IaoLCQE1miX3iUlQNxOPcVrXrshcO/YsFpxnFuhG9DM= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599 h1:0IMjHpzI9mgvGGtmsr1NdRhoXp++gU805f/f9oN94ls= github.com/smartcontractkit/chainlink-ccv v0.0.0-20251210114515-e8434089d599/go.mod h1:Ysd/qkofD0bepk29RS7Q4ZlVDd4yAHXucYsp5gAy6AE= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8 h1:b8aHhus/+bikhHlR5+Ll4Z0hsczJPA0hEj68/8+xyXs= -github.com/smartcontractkit/chainlink-common v0.9.6-0.20251210225051-4659b78ac2a8/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4 h1:i1Vi+c3Zptqgpvj99IOJZcux6OdYl/2X1QM/fudALP8= +github.com/smartcontractkit/chainlink-common v0.9.6-0.20251211140724-319861e514c4/go.mod h1:uRnGLHKo56QYaPk93z0NRAIgv115lh72rzG40CiE1Mk= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7 h1:9wh1G+WbXwPVqf0cfSRSgwIcaXTQgvYezylEAfwmrbw= github.com/smartcontractkit/chainlink-common/pkg/monitoring v0.0.0-20250415235644-8703639403c7/go.mod h1:yaDOAZF6MNB+NGYpxGCUc+owIdKrjvFW0JODdTcQ3V0= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19 h1:gU4suSMid2uQVSxdtPhqGR9s9w3ViclcGtwkQaNbrtM= -github.com/smartcontractkit/chainlink-data-streams v0.1.7-0.20251209111830-ccd12a5b2a19/go.mod h1:GPsn6PKJvPe1UfRYyVxsDzOWq6NILzBstiiLq/w+kG0= +github.com/smartcontractkit/chainlink-data-streams v0.1.7 h1:Mwb69azs8hsKyxw93zmLMLDK0QpkF7mZa9PK9eGsG3g= +github.com/smartcontractkit/chainlink-data-streams v0.1.7/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0 h1:wo2KL2viGZK/LhHLM8F88sRyhZF9wwWh+YDzW8hS00g= github.com/smartcontractkit/chainlink-deployments-framework v0.70.0/go.mod h1:Cp7PuO7HUDugp7bWGP/TcDAvvvkFLdKOVrSm0zXlnhg= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec h1:K8sLjgwPgozQb86LH+aWXqBUJak6VGwSt5YiKbCI/uY= github.com/smartcontractkit/chainlink-evm v0.3.4-0.20251210110629-10c56e8d2cec/go.mod h1:9VcrUs+H/f9ekkqAdfUd70Pk2dA1Zc3KykJVFBfJNHs= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b h1:Dqhm/67Sb1ohgce8FW6tnK1CRXo2zoLCbV+EGyew5sg= -github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022075638-49d961001d1b/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a h1:kVKWRGrSCioMY2lEVIEblerv/KkINIQS2hLUOw2wKOg= +github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135 h1:8u9xUrC+yHrTDexOKDd+jrA6LCzFFHeX1G82oj2fsSI= github.com/smartcontractkit/chainlink-feeds v0.1.2-0.20250227211209-7cd000095135/go.mod h1:NkvE4iQgiT7dMCP6U3xPELHhWhN5Xr6rHC0axRebyMU= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= diff --git a/system-tests/tests/load/cre/workflow_don_load_test.go b/system-tests/tests/load/cre/workflow_don_load_test.go index 3ad69cc0bba..61034a71e4f 100644 --- a/system-tests/tests/load/cre/workflow_don_load_test.go +++ b/system-tests/tests/load/cre/workflow_don_load_test.go @@ -703,7 +703,7 @@ func createFeedReport(lggr logger.Logger, price decimal.Decimal, timestamp uint6 reportBytes, err := reportCodec.Encode(report, llotypes.ChannelDefinition{ Streams: streams, - }) + }, nil) if err != nil { return nil, "", err }