diff --git a/.circleci/config.yml b/.circleci/config.yml index e9a4ee478..b4dadc809 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -235,6 +235,7 @@ jobs: - attach_workspace: at: ~/ - run: go install golang.org/x/tools/cmd/goimports + - run: go install github.com/hannahhoward/cbor-gen-for - run: make gen - run: git --no-pager diff && git --no-pager diff --quiet @@ -278,7 +279,7 @@ workflows: name: test-all requires: - build - suite: test-all target: "`go list ./... | grep -v curio/itests`" + suite: test-all get-params: true - resource_class: 2xlarge \ No newline at end of file + resource_class: 2xlarge diff --git a/Makefile b/Makefile index 89242c264..47c7d6a2b 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ ifeq ($(shell uname),Linux) batchdep: build/.supraseal-install batchdep: $(BUILD_DEPS) -,PHONY: batchdep +.PHONY: batchdep batch: GOFLAGS+=-tags=supraseal batch: CGO_LDFLAGS_ALLOW='.*' diff --git a/api/api_chain.go b/api/api_chain.go index 973342012..fa924af2a 100644 --- a/api/api_chain.go +++ b/api/api_chain.go @@ -98,6 +98,7 @@ type CurioChainRPC interface { StateVerifiedClientStatus(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) StateCirculatingSupply(context.Context, types.TipSetKey) (big.Int, error) + StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) } var _ CurioChainRPC = api.FullNode(nil) diff --git a/api/api_curio.go b/api/api_curio.go index a96b9a956..f5d07059a 100644 --- a/api/api_curio.go +++ b/api/api_curio.go @@ -8,10 +8,11 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/api" lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type Curio interface { diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 4ddba3ce8..7e50f8cbc 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -23,13 +23,14 @@ import ( "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" + storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var _ = reflect.TypeOf([]byte(nil)) @@ -123,6 +124,8 @@ type CurioChainRPCMethods struct { StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` + StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) `` + StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (big.Int, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `` @@ -622,6 +625,17 @@ func (s *CurioChainRPCStub) StateAccountKey(p0 context.Context, p1 address.Addre return *new(address.Address), ErrNotSupported } +func (s *CurioChainRPCStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { + if s.Internal.StateCall == nil { + return nil, ErrNotSupported + } + return s.Internal.StateCall(p0, p1, p2) +} + +func (s *CurioChainRPCStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) { + return nil, ErrNotSupported +} + func (s *CurioChainRPCStruct) StateCirculatingSupply(p0 context.Context, p1 types.TipSetKey) (big.Int, error) { if s.Internal.StateCirculatingSupply == nil { return *new(big.Int), ErrNotSupported diff --git a/build/openrpc/curio.json b/build/openrpc/curio.json index 5a93c8069..306158354 100644 --- a/build/openrpc/curio.json +++ b/build/openrpc/curio.json @@ -312,7 +312,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L311" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L314" } }, { @@ -348,7 +348,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L322" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L325" } }, { @@ -402,7 +402,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L333" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L336" } }, { @@ -425,7 +425,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L344" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L347" } }, { @@ -464,7 +464,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L355" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L358" } }, { @@ -503,7 +503,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L366" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L369" } }, { @@ -697,7 +697,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L377" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L380" } }, { @@ -829,7 +829,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L388" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L391" } }, { @@ -963,7 +963,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L399" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L402" } }, { @@ -1017,7 +1017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L410" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L413" } }, { @@ -1051,7 +1051,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L421" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L424" } }, { @@ -1128,7 +1128,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L432" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L435" } }, { @@ -1166,7 +1166,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L443" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L446" } } ] diff --git a/cmd/curio/config_test.go b/cmd/curio/config_test.go index 5d5e10e17..cd49ae54c 100644 --- a/cmd/curio/config_test.go +++ b/cmd/curio/config_test.go @@ -139,6 +139,16 @@ var baseText = ` # type: bool #EnableSendCommitMsg = false + # Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + #RequireActivationSuccess = true + + # Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + #RequireNotificationSuccess = true + # EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance. # This tasks should only be enabled on nodes with long-term storage. # @@ -155,6 +165,55 @@ var baseText = ` # type: int #MoveStorageMaxTasks = 0 + # EnableUpdateEncode enables the encoding step of the SnapDeal process on this curio instance. + # This step involves encoding the data into the sector and computing updated TreeR (uses gpu). + # + # type: bool + #EnableUpdateEncode = false + + # EnableUpdateProve enables the proving step of the SnapDeal process on this curio instance. + # This step generates the snark proof for the updated sector. + # + # type: bool + #EnableUpdateProve = false + + # EnableUpdateSubmit enables the submission of SnapDeal proofs to the blockchain from this curio instance. + # This step submits the generated proofs to the chain. + # + # type: bool + #EnableUpdateSubmit = false + + # UpdateEncodeMaxTasks sets the maximum number of concurrent SnapDeal encoding tasks that can run on this instance. + # + # type: int + #UpdateEncodeMaxTasks = 0 + + # UpdateProveMaxTasks sets the maximum number of concurrent SnapDeal proving tasks that can run on this instance. + # + # type: int + #UpdateProveMaxTasks = 0 + + # BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. + # This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. + # Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. + # Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. + # + # When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the + # deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one + # node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data. + # This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was + # received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for + # the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are + # sealed. + # + # To get API info for boost configuration run 'curio market rpc-info' + # + # NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on + # a machine which handles ParkPiece tasks. + # + # type: []string + #BoostAdapters = [] + # EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should # only need to be run on a single machine in the cluster. # @@ -164,7 +223,24 @@ var baseText = ` # The address that should listen for Web GUI requests. # # type: string - #GuiAddress = ":4701" + #GuiAddress = "0.0.0.0:4701" + + # UseSyntheticPoRep enables the synthetic PoRep for all new sectors. When set to true, will reduce the amount of + # cache data held on disk after the completion of TreeRC task to 11GiB. + # + # type: bool + #UseSyntheticPoRep = false + + # The maximum amount of SyntheticPoRep tasks that can run simultaneously. Note that the maximum number of tasks will + # also be bounded by resources available on the machine. + # + # type: int + #SyntheticPoRepMaxTasks = 0 + + # EnableDealMarket + # + # type: bool + #EnableDealMarket = false [Fees] @@ -188,6 +264,16 @@ var baseText = ` # type: types.FIL #MaxPublishDealsFee = "0.05 FIL" + # Whether to use available miner balance for sector collateral instead of sending it with each message + # + # type: bool + #CollateralFromMinerBalance = false + + # Don't send collateral with messages even if there is no available balance in the miner actor + # + # type: bool + #DisableCollateralFallback = false + [Fees.MaxPreCommitBatchGasFee] # type: types.FIL #Base = "0 FIL" @@ -202,19 +288,6 @@ var baseText = ` # type: types.FIL #PerSector = "0.03 FIL" -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01013"] - [[Addresses]] #PreCommitControl = [] @@ -230,20 +303,6 @@ var baseText = ` #MinerAddresses = [] -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01006"] - - [Proving] # Maximum number of sector checks to run in parallel. (0 = unlimited) # @@ -278,25 +337,6 @@ var baseText = ` # type: Duration #PartitionCheckTimeout = "20m0s" - # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. - # - # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need - # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableBuiltinWindowPoSt = false - - # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. - # - # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. - # Before enabling this option, make sure your PoSt workers work correctly. - # - # type: bool - #DisableBuiltinWinningPoSt = false - # Disable WindowPoSt provable sector readability checks. # # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges @@ -359,26 +399,156 @@ var baseText = ` #SingleRecoveringPartitionPerPostMessage = false -[Journal] - # Events of the form: "system1:event1,system1:event2[,...]" +[Market] + [Market.DealMarketConfig] + + [[Market.DealMarketConfig.PieceLocator]] + #URL = "https://localhost:9999" + + [Market.DealMarketConfig.PieceLocator.Headers] + #Authorization = ["Basic YWRtaW46c2VjcmV0"] + + [Market.DealMarketConfig.MK12] + # Miners is a list of miner to enable MK12 deals for + # + # type: []string + #Miners = ["t01000"] + + # When a deal is ready to publish, the amount of time to wait for more + # deals to be ready to publish before publishing them all as a batch + # + # type: Duration + #PublishMsgPeriod = "0s" + + # The maximum number of deals to include in a single PublishStorageDeals + # message + # + # type: uint64 + #MaxDealsPerPublishMsg = 0 + + # The maximum collateral that the provider will put up against a deal, + # as a multiplier of the minimum collateral bound + # The maximum fee to pay when sending the PublishStorageDeals message + # + # type: types.FIL + #MaxPublishDealsFee = "0 FIL" + + # ExpectedSealDuration is the expected time it would take to seal the deal sector + # This will be used to fail the deals which cannot be sealed on time. + # Please make sure to update this to shorter duration for snap deals + # + # type: Duration + #ExpectedSealDuration = "0s" + + +[Ingest] + # Maximum number of sectors that can be queued waiting for deals to start processing. + # 0 = unlimited + # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. + # The DealSector queue includes deals which are ready to enter the sealing pipeline but are not yet part of it - + # size of this queue will also impact the maximum number of ParkPiece tasks which can run concurrently. + # DealSector queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism. # - # type: string - #DisabledEvents = "" + # type: int + #MaxQueueDealSector = 8 + + # Maximum number of sectors that can be queued waiting for SDR to start processing. + # 0 = unlimited + # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. + # The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is + # possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors + # entering the pipeline. + # + # type: int + #MaxQueueSDR = 8 + # Maximum number of sectors that can be queued waiting for SDRTrees to start processing. + # 0 = unlimited + # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. + # In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only + # applied to sectors entering the pipeline. + # + # type: int + #MaxQueueTrees = 0 -[Apis] - # ChainApiInfo is the API endpoint for the Lotus daemon. + # Maximum number of sectors that can be queued waiting for PoRep to start processing. + # 0 = unlimited + # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. + # Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only + # applied to sectors entering the pipeline. # - # type: []string - ChainApiInfo = ["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.T_jmG4DTs9Zjd7rr78862lT7D2U63uz-zqcUKHwcqaU:/dns/localhost/tcp/1234/http"] + # type: int + #MaxQueuePoRep = 0 + + # Maximum time an open deal sector should wait for more deal before it starts sealing + # + # type: Duration + #MaxDealWaitTime = "1h0m0s" + + # DoSnap enables the snap deal process for deals ingested by this instance. Unlike in lotus-miner there is no + # fallback to porep when no sectors are available to snap into. When enabled all deals will be snap deals. + # + # type: bool + #DoSnap = false + +[Apis] # RPC Secret for the storage subsystem. # If integrating with lotus-miner this must match the value from # cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey # # type: string - StorageRPCSecret = "HxHe8YLHiY0LjHVw/WT/4XQkPGgRyCEYk+xiFi0Ob0o=" + #StorageRPCSecret = "" + +[Alerting] + # MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an + # alerts will be triggered for the wallet + # + # type: types.FIL + #MinimumWalletBalance = "5 FIL" + + [Alerting.PagerDuty] + # Enable is a flag to enable or disable the PagerDuty integration. + # + # type: bool + #Enable = false + + # PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately + # routed to a PagerDuty.com service and processed. + # The default is sufficient for integration with the stock commercial PagerDuty.com company's service. + # + # type: string + #PagerDutyEventURL = "https://events.pagerduty.com/v2/enqueue" + + # PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service + # identifier in the integration page for the service. + # + # type: string + #PageDutyIntegrationKey = "" + + [Alerting.PrometheusAlertManager] + # Enable is a flag to enable or disable the Prometheus AlertManager integration. + # + # type: bool + #Enable = false + + # AlertManagerURL is the URL for the Prometheus AlertManager API v2 URL. + # + # type: string + #AlertManagerURL = "http://localhost:9093/api/v2/alerts" + + [Alerting.SlackWebhook] + # Enable is a flag to enable or disable the Prometheus AlertManager integration. + # + # type: bool + #Enable = false + + # WebHookURL is the URL for the URL for slack Webhook. + # Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX + # + # type: string + #WebHookURL = "" ` func TestConfig(t *testing.T) { diff --git a/cmd/curio/guidedsetup/gen/main.go b/cmd/curio/guidedsetup/gen/main.go deleted file mode 100644 index 6e5765d1b..000000000 --- a/cmd/curio/guidedsetup/gen/main.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "fmt" - "os" - - gen "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/curio/cmd/curio/guidedsetup" -) - -func main() { - err := gen.WriteMapEncodersToFile("./cbor_gen.go", "guidedsetup", - guidedsetup.SectorInfo{}, - ) - if err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/cmd/curio/guidedsetup/shared.go b/cmd/curio/guidedsetup/shared.go index 51fec2d14..7b7584948 100644 --- a/cmd/curio/guidedsetup/shared.go +++ b/cmd/curio/guidedsetup/shared.go @@ -26,13 +26,15 @@ import ( "github.com/filecoin-project/curio/deps" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/types/sector" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) +//go:generate cbor-gen-for --map-encoding SectorInfo + const ( FlagMinerRepo = "miner-repo" ) diff --git a/cmd/curio/guidedsetup/cbor_gen.go b/cmd/curio/guidedsetup/shared_cbor_gen.go similarity index 99% rename from cmd/curio/guidedsetup/cbor_gen.go rename to cmd/curio/guidedsetup/shared_cbor_gen.go index 9a2beae43..c88b9e31f 100644 --- a/cmd/curio/guidedsetup/cbor_gen.go +++ b/cmd/curio/guidedsetup/shared_cbor_gen.go @@ -8,15 +8,12 @@ import ( "math" "sort" + storiface "github.com/filecoin-project/curio/lib/storiface" + sector "github.com/filecoin-project/curio/lib/types/sector" + abi "github.com/filecoin-project/go-state-types/abi" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" - - abi "github.com/filecoin-project/go-state-types/abi" - - sector "github.com/filecoin-project/curio/lib/types/sector" - - storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var _ = xerrors.Errorf diff --git a/cmd/curio/market.go b/cmd/curio/market.go index 070cd375a..cc11b58b4 100644 --- a/cmd/curio/market.go +++ b/cmd/curio/market.go @@ -1,10 +1,15 @@ package main import ( + "bufio" + "encoding/json" "fmt" - "sort" + "net/http" + "os" "strconv" + "strings" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -13,71 +18,17 @@ import ( "github.com/filecoin-project/curio/deps" "github.com/filecoin-project/curio/lib/reqcontext" - "github.com/filecoin-project/curio/market" - "github.com/filecoin-project/curio/market/lmrpc" + "github.com/filecoin-project/curio/market/storageingest" ) var marketCmd = &cli.Command{ Name: "market", Subcommands: []*cli.Command{ - marketRPCInfoCmd, marketSealCmd, + marketAddOfflineURLCmd, }, } -var marketRPCInfoCmd = &cli.Command{ - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - layers := cctx.StringSlice("layers") - - cfg, err := deps.GetConfig(cctx.Context, layers, db) - if err != nil { - return xerrors.Errorf("get config: %w", err) - } - - ts, err := lmrpc.MakeTokens(cfg) - if err != nil { - return xerrors.Errorf("make tokens: %w", err) - } - - var addrTokens []struct { - Address string - Token string - } - - for address, s := range ts { - addrTokens = append(addrTokens, struct { - Address string - Token string - }{ - Address: address.String(), - Token: s, - }) - } - - sort.Slice(addrTokens, func(i, j int) bool { - return addrTokens[i].Address < addrTokens[j].Address - }) - - for _, at := range addrTokens { - fmt.Printf("[lotus-miner/boost compatible] %s %s\n", at.Address, at.Token) - } - - return nil - }, - Name: "rpc-info", -} - var marketSealCmd = &cli.Command{ Name: "seal", Usage: "start sealing a deal sector early", @@ -117,6 +68,162 @@ var marketSealCmd = &cli.Command{ return err } - return market.SealNow(ctx, dep.Chain, dep.DB, act, abi.SectorNumber(sector), cctx.Bool("synthetic")) + return storageingest.SealNow(ctx, dep.Chain, dep.DB, act, abi.SectorNumber(sector), cctx.Bool("synthetic")) + }, +} + +var marketAddOfflineURLCmd = &cli.Command{ + Name: "add-url", + Usage: "Add URL to fetch data for offline deals", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "file", + Usage: "CSV file location to use for multiple deal input. Each line in the file should be in the format 'uuid,raw size,url,header1,header2...'\"", + }, + &cli.StringSliceFlag{ + Name: "header", + Aliases: []string{"H"}, + Usage: "Custom `HEADER` to include in the HTTP request", + }, + &cli.StringFlag{ + Name: "url", + Aliases: []string{"u"}, + Usage: "`URL` to send the request to", + Required: true, + }, + }, + ArgsUsage: " ", + Action: func(cctx *cli.Context) error { + if !cctx.IsSet("file") && cctx.Args().Len() != 2 { + return xerrors.Errorf("incorrect number of arguments") + } + + ctx := reqcontext.ReqContext(cctx) + dep, err := deps.GetDepsCLI(ctx, cctx) + if err != nil { + return err + } + + if cctx.IsSet("file") { + // Read file line by line + fileStr := cctx.String("file") + loc, err := homedir.Expand(fileStr) + if err != nil { + return err + } + file, err := os.Open(loc) + if err != nil { + return err + } + defer file.Close() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + // Extract pieceCid, pieceSize and MinerAddr from line + parts := strings.SplitN(line, ",", 4) + if parts[0] == "" || parts[1] == "" || parts[2] == "" { + return fmt.Errorf("empty column value in the input file at %s", line) + } + + uuid := parts[0] + size, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return fmt.Errorf("failed to parse size %w", err) + } + + url := parts[2] + + if parts[3] != "" { + header := http.Header{} + for _, s := range strings.Split(parts[3], ",") { + key, value, found := strings.Cut(s, ":") + if !found { + return fmt.Errorf("invalid header format, expected key:value") + } + header.Set(strings.TrimSpace(key), strings.TrimSpace(value)) + } + + hdr, err := json.Marshal(header) + if err != nil { + return xerrors.Errorf("marshalling headers: %w", err) + } + _, err = dep.DB.Exec(ctx, `INSERT INTO market_offline_urls ( + uuid, + url, + headers, + raw_size + ) VALUES ($1, $2, $3, $4);`, + uuid, url, hdr, size) + if err != nil { + return xerrors.Errorf("adding details to DB: %w", err) + } + } else { + _, err = dep.DB.Exec(ctx, `INSERT INTO market_offline_urls ( + uuid, + url, + raw_size + ) VALUES ($1, $2, $3, $4);`, + uuid, url, size) + if err != nil { + return xerrors.Errorf("adding details to DB: %w", err) + } + } + + if err := scanner.Err(); err != nil { + return err + } + } + } + + url := cctx.String("url") + + uuid := cctx.Args().First() + + sizeStr := cctx.Args().Get(1) + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return xerrors.Errorf("parsing size: %w", err) + } + + if cctx.IsSet("header") { + // Split the header into key-value + header := http.Header{} + headerValue := cctx.StringSlice("header") + for _, s := range headerValue { + key, value, found := strings.Cut(s, ":") + if !found { + return fmt.Errorf("invalid header format, expected key:value") + } + header.Set(strings.TrimSpace(key), strings.TrimSpace(value)) + } + + hdr, err := json.Marshal(header) + if err != nil { + return xerrors.Errorf("marshalling headers: %w", err) + } + + _, err = dep.DB.Exec(ctx, `INSERT INTO market_offline_urls ( + uuid, + url, + headers, + raw_size + ) VALUES ($1, $2, $3, $4);`, + uuid, url, hdr, size) + if err != nil { + return xerrors.Errorf("adding details to DB: %w", err) + } + } else { + _, err = dep.DB.Exec(ctx, `INSERT INTO market_offline_urls ( + uuid, + url, + raw_size + ) VALUES ($1, $2, $3, $4);`, + uuid, url, size) + if err != nil { + return xerrors.Errorf("adding details to DB: %w", err) + } + } + + return nil }, } diff --git a/cmd/curio/pipeline.go b/cmd/curio/pipeline.go index 86fc06454..54d8741a9 100644 --- a/cmd/curio/pipeline.go +++ b/cmd/curio/pipeline.go @@ -143,17 +143,33 @@ var sealStartCmd = &cli.Command{ } } - num, err := seal.AllocateSectorNumbers(ctx, dep.Chain, dep.DB, act, cctx.Int("count"), func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - for _, n := range numbers { - _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof, user_sector_duration_epochs) values ($1, $2, $3, $4)", mid, n, spt, userDuration) + var num []abi.SectorNumber + + comm, err := dep.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + num, err = seal.AllocateSectorNumbers(ctx, dep.Chain, tx, act, cctx.Int("count")) + if err != nil { + return false, err + } + + for _, n := range num { + _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt) if err != nil { return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) } } + + if err != nil { + return false, xerrors.Errorf("allocating sector numbers: %w", err) + } return true, nil }) + if err != nil { - return xerrors.Errorf("allocating sector numbers: %w", err) + return xerrors.Errorf("failed to allocate new sectors: %w", err) + } + + if !comm { + return xerrors.Errorf("failed to commit the transaction") } for _, number := range num { diff --git a/cmd/curio/rpc/rpc.go b/cmd/curio/rpc/rpc.go index ddd1c6e00..11d97050a 100644 --- a/cmd/curio/rpc/rpc.go +++ b/cmd/curio/rpc/rpc.go @@ -33,6 +33,7 @@ import ( "github.com/filecoin-project/curio/lib/metrics" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/repo" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/web" lapi "github.com/filecoin-project/lotus/api" @@ -42,7 +43,6 @@ import ( "github.com/filecoin-project/lotus/metrics/proxy" "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const metaFile = "sectorstore.json" diff --git a/cmd/curio/run.go b/cmd/curio/run.go index 9a359cddf..dedc78d3c 100644 --- a/cmd/curio/run.go +++ b/cmd/curio/run.go @@ -16,7 +16,6 @@ import ( "github.com/filecoin-project/curio/cmd/curio/tasks" "github.com/filecoin-project/curio/deps" "github.com/filecoin-project/curio/lib/shutdown" - "github.com/filecoin-project/curio/market/lmrpc" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" @@ -131,10 +130,6 @@ var runCmd = &cli.Command{ } defer taskEngine.GracefullyTerminate() - if err := lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Chain, dependencies.Cfg); err != nil { - return xerrors.Errorf("starting market RPCs: %w", err) - } - err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. if err != nil { return err diff --git a/cmd/curio/storage.go b/cmd/curio/storage.go index 09fb57a6c..ca15f0ebb 100644 --- a/cmd/curio/storage.go +++ b/cmd/curio/storage.go @@ -20,10 +20,10 @@ import ( "github.com/filecoin-project/curio/cmd/curio/rpc" "github.com/filecoin-project/curio/lib/reqcontext" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var storageCmd = &cli.Command{ @@ -438,7 +438,7 @@ var storageFindCmd = &cli.Command{ } sectorTypes := []storiface.SectorFileType{ - storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache, + storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache, storiface.FTPiece, } byId := make(map[storiface.ID]*storedSector) diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go index 94592c447..1e54dde78 100644 --- a/cmd/curio/tasks/tasks.go +++ b/cmd/curio/tasks/tasks.go @@ -30,13 +30,17 @@ import ( "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/slotmgr" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/libp2p" "github.com/filecoin-project/curio/tasks/gc" + "github.com/filecoin-project/curio/tasks/indexing" "github.com/filecoin-project/curio/tasks/message" "github.com/filecoin-project/curio/tasks/metadata" piece2 "github.com/filecoin-project/curio/tasks/piece" "github.com/filecoin-project/curio/tasks/seal" "github.com/filecoin-project/curio/tasks/sealsupra" "github.com/filecoin-project/curio/tasks/snap" + storage_market "github.com/filecoin-project/curio/tasks/storage-market" window2 "github.com/filecoin-project/curio/tasks/window" "github.com/filecoin-project/curio/tasks/winning" @@ -44,7 +48,6 @@ import ( "github.com/filecoin-project/lotus/lib/lazy" "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("curio/deps") @@ -87,6 +90,9 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task si := dependencies.Si bstore := dependencies.Bstore machine := dependencies.ListenAddr + iStore := dependencies.IndexStore + pp := dependencies.PieceProvider + var activeTasks []harmonytask.TaskInterface sender, sendTask := message.NewSender(full, full, db) @@ -192,14 +198,53 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.Task activeTasks = append(activeTasks, sealingTasks...) } - amTask := alertmanager.NewAlertTask(full, db, cfg.Alerting, dependencies.Al) - activeTasks = append(activeTasks, amTask) - minerAddresses := make([]string, 0, len(maddrs)) + miners := make([]address.Address, 0, len(maddrs)) for k := range maddrs { + miners = append(miners, address.Address(k)) minerAddresses = append(minerAddresses, address.Address(k).String()) } + { + // Market tasks + sc, err := slrLazy.Val() + if err != nil { + return nil, err + } + + if cfg.Subsystems.EnableDealMarket { + // Main market poller should run on all nodes + dm := storage_market.NewCurioStorageDealMarket(miners, db, cfg, sc, full) + err = dm.StartMarket(ctx) + if err != nil { + return nil, err + } + + if cfg.Subsystems.EnableCommP { + commpTask := storage_market.NewCommpTask(dm, db, must.One(slrLazy.Val()), full, cfg.Subsystems.CommPMaxTasks) + activeTasks = append(activeTasks, commpTask) + } + + // PSD and Deal find task do not require many resources. They can run on all machines + psdTask := storage_market.NewPSDTask(dm, db, sender, as, &cfg.Market.StorageMarketConfig.MK12, full) + dealFindTask := storage_market.NewFindDealTask(dm, db, full, &cfg.Market.StorageMarketConfig.MK12) + activeTasks = append(activeTasks, psdTask, dealFindTask) + + // Start libp2p hosts and handle streams + err = libp2p.NewDealProvider(ctx, db, cfg, dm.MK12Handler, full, machine) + if err != nil { + return nil, err + } + } + + indexingTask := indexing.NewIndexingTask(db, sc, iStore, pp, cfg) + activeTasks = append(activeTasks, indexingTask) + + } + + amTask := alertmanager.NewAlertTask(full, db, cfg.Alerting, dependencies.Al) + activeTasks = append(activeTasks, amTask) + log.Infow("This Curio instance handles", "miner_addresses", minerAddresses, "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index 3776faf02..2d0a0dc19 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -133,6 +133,12 @@ alerts will be triggered for the wallet`, Comment: ``, }, + { + Name: "Market", + Type: "MarketConfig", + + Comment: ``, + }, { Name: "Ingest", Type: "CurioIngestConfig", @@ -662,28 +668,6 @@ This step submits the generated proofs to the chain.`, Comment: `UpdateProveMaxTasks sets the maximum number of concurrent SnapDeal proving tasks that can run on this instance.`, }, - { - Name: "BoostAdapters", - Type: "[]string", - - Comment: `BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. -This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. -Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. -Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. - -When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the -deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one -node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data. -This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was -received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for -the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are -sealed. - -To get API info for boost configuration run 'curio market rpc-info' - -NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on -a machine which handles ParkPiece tasks.`, - }, { Name: "EnableWebGui", Type: "bool", @@ -717,6 +701,33 @@ also be bounded by resources available on the machine.`, Comment: `Batch Seal`, }, + { + Name: "EnableDealMarket", + Type: "bool", + + Comment: `EnableDealMarket enabled the deal market on the node. This would also enable libp2p on the node, if configured.`, + }, + { + Name: "EnableCommP", + Type: "bool", + + Comment: `EnableCommP enables the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 deal +Must have EnableDealMarket = True`, + }, + { + Name: "CommPMaxTasks", + Type: "int", + + Comment: `The maximum amount of CommP tasks that can run simultaneously. Note that the maximum number of tasks will +also be bounded by resources available on the machine.`, + }, + { + Name: "EnableLibp2p", + Type: "bool", + + Comment: `EnableLibp2p enabled the libp2p module for the market. Must have EnableDealMarket set to true and must only be enabled +on a sinle node. Enabling on multiple nodes will cause issues with libp2p deals.`, + }, }, "Duration time.Duration": { { @@ -738,6 +749,108 @@ also be bounded by resources available on the machine.`, Comment: ``, }, }, + "IndexingConfig": { + { + Name: "InsertBatchSize", + Type: "int", + + Comment: `Number of records per insert batch`, + }, + { + Name: "InsertConcurrency", + Type: "int", + + Comment: `Number of concurrent inserts to split AddIndex calls to`, + }, + }, + "Libp2pConfig": { + { + Name: "DisabledMiners", + Type: "[]string", + + Comment: `Miners ID for which MK12 deals (boosts) should be disabled`, + }, + { + Name: "ListenAddresses", + Type: "[]string", + + Comment: `Binding address for the libp2p host - 0 means random port. +Format: multiaddress; see https://multiformats.io/multiaddr/`, + }, + { + Name: "AnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to explicitally announce to other peers. If not specified, +all interface addresses are announced +Format: multiaddress`, + }, + { + Name: "NoAnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to not announce +Format: multiaddress`, + }, + }, + "MK12Config": { + { + Name: "Libp2p", + Type: "Libp2pConfig", + + Comment: `Libp2p is a list of libp2p config for all miner IDs.`, + }, + { + Name: "PublishMsgPeriod", + Type: "Duration", + + Comment: `When a deal is ready to publish, the amount of time to wait for more +deals to be ready to publish before publishing them all as a batch`, + }, + { + Name: "MaxDealsPerPublishMsg", + Type: "uint64", + + Comment: `The maximum number of deals to include in a single PublishStorageDeals +message`, + }, + { + Name: "MaxPublishDealFee", + Type: "types.FIL", + + Comment: `The maximum fee to pay per deal when sending the PublishStorageDeals message`, + }, + { + Name: "ExpectedPoRepSealDuration", + Type: "Duration", + + Comment: `ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector +This will be used to fail the deals which cannot be sealed on time.`, + }, + { + Name: "ExpectedSnapSealDuration", + Type: "Duration", + + Comment: `ExpectedSnapSealDuration is the expected time it would take to snap the deal sector +This will be used to fail the deals which cannot be sealed on time.`, + }, + { + Name: "SkipCommP", + Type: "bool", + + Comment: `SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain +Warning: If this check is skipped and there is a commP mismatch, all deals in the +sector will need to be sent again`, + }, + }, + "MarketConfig": { + { + Name: "StorageMarketConfig", + Type: "StorageMarketConfig", + + Comment: `StorageMarketConfig houses all the deal related market configuration`, + }, + }, "PagerDutyConfig": { { Name: "Enable", @@ -761,6 +874,20 @@ The default is sufficient for integration with the stock commercial PagerDuty.co identifier in the integration page for the service.`, }, }, + "PieceLocatorConfig": { + { + Name: "URL", + Type: "string", + + Comment: ``, + }, + { + Name: "Headers", + Type: "http.Header", + + Comment: ``, + }, + }, "PrometheusAlertManagerConfig": { { Name: "Enable", @@ -790,4 +917,28 @@ identifier in the integration page for the service.`, Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX`, }, }, + "StorageMarketConfig": { + { + Name: "PieceLocator", + Type: "[]PieceLocatorConfig", + + Comment: `PieceLocator is a list of HTTP url and headers combination to query for a piece for offline deals +User can run a remote file server which can host all the pieces over the HTTP and supply a reader when requested. +The server must have 2 endpoints +1. /pieces?id=pieceCID responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value +2. /data?id=pieceCID must provide a reader for the requested piece`, + }, + { + Name: "Indexing", + Type: "IndexingConfig", + + Comment: `Indexing configuration for deal indexing`, + }, + { + Name: "MK12", + Type: "MK12Config", + + Comment: `MK12 encompasses all configuration related to deal protocol mk1.2.0 and mk1.2.1 (i.e. Boost deals)`, + }, + }, } diff --git a/deps/config/types.go b/deps/config/types.go index 18386ae84..fd8513d92 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -1,6 +1,7 @@ package config import ( + "net/http" "time" "github.com/filecoin-project/lotus/chain/types" @@ -10,7 +11,6 @@ func DefaultCurioConfig() *CurioConfig { return &CurioConfig{ Subsystems: CurioSubsystemsConfig{ GuiAddress: "0.0.0.0:4701", - BoostAdapters: []string{}, RequireActivationSuccess: true, RequireNotificationSuccess: true, }, @@ -70,6 +70,28 @@ func DefaultCurioConfig() *CurioConfig { AlertManagerURL: "http://localhost:9093/api/v2/alerts", }, }, + Market: MarketConfig{ + StorageMarketConfig: StorageMarketConfig{ + PieceLocator: []PieceLocatorConfig{}, + Indexing: IndexingConfig{ + InsertConcurrency: 8, + InsertBatchSize: 15000, + }, + MK12: MK12Config{ + Libp2p: Libp2pConfig{ + DisabledMiners: []string{}, + ListenAddresses: []string{"/ip4/0.0.0.0/tcp/12200", "/ip4/0.0.0.0/udp/12280/quic-v1/webtransport"}, + AnnounceAddresses: []string{}, + NoAnnounceAddresses: []string{}, + }, + PublishMsgPeriod: Duration(5 * time.Minute), + MaxDealsPerPublishMsg: 8, + MaxPublishDealFee: types.MustParseFIL("0.5 FIL"), + ExpectedPoRepSealDuration: Duration(8 * time.Hour), + ExpectedSnapSealDuration: Duration(2 * time.Hour), + }, + }, + }, } } @@ -81,6 +103,7 @@ type CurioConfig struct { // Addresses of wallets per MinerAddress (one of the fields). Addresses []CurioAddresses Proving CurioProvingConfig + Market MarketConfig Ingest CurioIngestConfig Seal CurioSealConfig Apis ApisConfig @@ -234,25 +257,6 @@ type CurioSubsystemsConfig struct { // UpdateProveMaxTasks sets the maximum number of concurrent SnapDeal proving tasks that can run on this instance. UpdateProveMaxTasks int - // BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. - // This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. - // Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. - // Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. - // - // When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the - // deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one - // node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data. - // This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was - // received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for - // the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are - // sealed. - // - // To get API info for boost configuration run 'curio market rpc-info' - // - // NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on - // a machine which handles ParkPiece tasks. - BoostAdapters []string - // EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should // only need to be run on a single machine in the cluster. EnableWebGui bool @@ -270,6 +274,21 @@ type CurioSubsystemsConfig struct { // Batch Seal EnableBatchSeal bool + + // EnableDealMarket enabled the deal market on the node. This would also enable libp2p on the node, if configured. + EnableDealMarket bool + + // EnableCommP enables the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 deal + // Must have EnableDealMarket = True + EnableCommP bool + + // The maximum amount of CommP tasks that can run simultaneously. Note that the maximum number of tasks will + // also be bounded by resources available on the machine. + CommPMaxTasks int + + // EnableLibp2p enabled the libp2p module for the market. Must have EnableDealMarket set to true and must only be enabled + // on a sinle node. Enabling on multiple nodes will cause issues with libp2p deals. + EnableLibp2p bool } type CurioFees struct { DefaultMaxFee types.FIL @@ -548,3 +567,80 @@ type ApisConfig struct { // cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey StorageRPCSecret string } + +type MarketConfig struct { + // StorageMarketConfig houses all the deal related market configuration + StorageMarketConfig StorageMarketConfig +} + +type StorageMarketConfig struct { + // PieceLocator is a list of HTTP url and headers combination to query for a piece for offline deals + // User can run a remote file server which can host all the pieces over the HTTP and supply a reader when requested. + // The server must have 2 endpoints + // 1. /pieces?id=pieceCID responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value + // 2. /data?id=pieceCID must provide a reader for the requested piece + PieceLocator []PieceLocatorConfig + + // Indexing configuration for deal indexing + Indexing IndexingConfig + + // MK12 encompasses all configuration related to deal protocol mk1.2.0 and mk1.2.1 (i.e. Boost deals) + MK12 MK12Config +} + +type MK12Config struct { + // Libp2p is a list of libp2p config for all miner IDs. + Libp2p Libp2pConfig + + // When a deal is ready to publish, the amount of time to wait for more + // deals to be ready to publish before publishing them all as a batch + PublishMsgPeriod Duration + + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerPublishMsg uint64 + + // The maximum fee to pay per deal when sending the PublishStorageDeals message + MaxPublishDealFee types.FIL + + // ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector + // This will be used to fail the deals which cannot be sealed on time. + ExpectedPoRepSealDuration Duration + + // ExpectedSnapSealDuration is the expected time it would take to snap the deal sector + // This will be used to fail the deals which cannot be sealed on time. + ExpectedSnapSealDuration Duration + + // SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain + // Warning: If this check is skipped and there is a commP mismatch, all deals in the + // sector will need to be sent again + SkipCommP bool +} + +type PieceLocatorConfig struct { + URL string + Headers http.Header +} + +type IndexingConfig struct { + // Number of records per insert batch + InsertBatchSize int + + // Number of concurrent inserts to split AddIndex calls to + InsertConcurrency int +} + +type Libp2pConfig struct { + // Miners ID for which MK12 deals (boosts) should be disabled + DisabledMiners []string + // Binding address for the libp2p host - 0 means random port. + // Format: multiaddress; see https://multiformats.io/multiaddr/ + ListenAddresses []string + // Addresses to explicitally announce to other peers. If not specified, + // all interface addresses are announced + // Format: multiaddress + AnnounceAddresses []string + // Addresses to not announce + // Format: multiaddress + NoAnnounceAddresses []string +} diff --git a/deps/deps.go b/deps/deps.go index 50b60d5eb..d84ad5268 100644 --- a/deps/deps.go +++ b/deps/deps.go @@ -37,7 +37,10 @@ import ( "github.com/filecoin-project/curio/lib/curiochain" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/pieceprovider" "github.com/filecoin-project/curio/lib/repo" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/indexstore" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -46,7 +49,6 @@ import ( lrepo "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("curio/deps") @@ -165,23 +167,25 @@ func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { } type Deps struct { - Layers []string - Cfg *config.CurioConfig // values - DB *harmonydb.DB // has itest capability - Chain api.Chain - Bstore curiochain.CurioBlockstore - Verif storiface.Verifier - As *multictladdr.MultiAddressSelector - Maddrs map[dtypes.MinerAddress]bool - ProofTypes map[abi.RegisteredSealProof]bool - Stor *paths.Remote - Al *curioalerting.AlertingSystem - Si paths.SectorIndex - LocalStore *paths.Local - LocalPaths *paths.BasicLocalStorage - ListenAddr string - Name string - Alert *alertmanager.AlertNow + Layers []string + Cfg *config.CurioConfig // values + DB *harmonydb.DB // has itest capability + Chain api.Chain + Bstore curiochain.CurioBlockstore + Verif storiface.Verifier + As *multictladdr.MultiAddressSelector + Maddrs map[dtypes.MinerAddress]bool + ProofTypes map[abi.RegisteredSealProof]bool + Stor *paths.Remote + Al *curioalerting.AlertingSystem + Si paths.SectorIndex + LocalStore *paths.Local + LocalPaths *paths.BasicLocalStorage + ListenAddr string + Name string + Alert *alertmanager.AlertNow + IndexStore *indexstore.IndexStore + PieceProvider *pieceprovider.PieceProvider } const ( @@ -348,6 +352,17 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, deps.Name = cctx.String("name") } + if deps.IndexStore == nil { + deps.IndexStore, err = indexstore.NewIndexStore(strings.Split(cctx.String("db-host"), ","), deps.Cfg) + if err != nil { + return xerrors.Errorf("failed to start index store: %w", err) + } + } + + if deps.PieceProvider == nil { + deps.PieceProvider = pieceprovider.NewPieceProvider(deps.Stor, deps.Si) + } + return nil } diff --git a/docker/curio/entrypoint.sh b/docker/curio/entrypoint.sh index 0d93682b0..2ef6294aa 100755 --- a/docker/curio/entrypoint.sh +++ b/docker/curio/entrypoint.sh @@ -36,7 +36,8 @@ if [ ! -f $CURIO_REPO_PATH/.init.curio ]; then echo Initiating a new Curio cluster ... curio config new-cluster $newminer echo Enabling market ... - curio config get seal | sed -e $'$a\\\n BoostAdapters = ["'"$newminer"':'"$myip"':32100"]\n EnableParkPiece = true' | curio config set --title seal + curio config get seal | sed -e $'$a\\\n EnableParkPiece = true' | curio config set --title seal + curio config get base | sed -e 's/#Miners = \[\]/Miners = ["'"$newminer"'"]/g' | curio config set --title base touch $CURIO_REPO_PATH/.init.config fi diff --git a/documentation/en/configuration/default-curio-configuration.md b/documentation/en/configuration/default-curio-configuration.md index 24aab3ad5..c113dc8d8 100644 --- a/documentation/en/configuration/default-curio-configuration.md +++ b/documentation/en/configuration/default-curio-configuration.md @@ -194,27 +194,6 @@ description: The default curio configuration # type: int #UpdateProveMaxTasks = 0 - # BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. - # This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. - # Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. - # Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified. - # - # When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the - # deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one - # node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data. - # This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was - # received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for - # the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are - # sealed. - # - # To get API info for boost configuration run 'curio market rpc-info' - # - # NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on - # a machine which handles ParkPiece tasks. - # - # type: []string - #BoostAdapters = [] - # EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should # only need to be run on a single machine in the cluster. # @@ -243,6 +222,29 @@ description: The default curio configuration # type: bool #EnableBatchSeal = false + # EnableDealMarket enabled the deal market on the node. This would also enable libp2p on the node, if configured. + # + # type: bool + #EnableDealMarket = false + + # EnableCommP enables the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 deal + # Must have EnableDealMarket = True + # + # type: bool + #EnableCommP = false + + # The maximum amount of CommP tasks that can run simultaneously. Note that the maximum number of tasks will + # also be bounded by resources available on the machine. + # + # type: int + #CommPMaxTasks = 0 + + # EnableLibp2p enabled the libp2p module for the market. Must have EnableDealMarket set to true and must only be enabled + # on a sinle node. Enabling on multiple nodes will cause issues with libp2p deals. + # + # type: bool + #EnableLibp2p = false + [Fees] # type: types.FIL @@ -400,6 +402,91 @@ description: The default curio configuration #SingleRecoveringPartitionPerPostMessage = false +[Market] + [Market.StorageMarketConfig] + # PieceLocator is a list of HTTP url and headers combination to query for a piece for offline deals + # User can run a remote file server which can host all the pieces over the HTTP and supply a reader when requested. + # The server must have 2 endpoints + # 1. /pieces?id=pieceCID responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value + # 2. /data?id=pieceCID must provide a reader for the requested piece + # + # type: []PieceLocatorConfig + #PieceLocator = [] + + [Market.StorageMarketConfig.Indexing] + # Number of records per insert batch + # + # type: int + #InsertBatchSize = 15000 + + # Number of concurrent inserts to split AddIndex calls to + # + # type: int + #InsertConcurrency = 8 + + [Market.StorageMarketConfig.MK12] + # When a deal is ready to publish, the amount of time to wait for more + # deals to be ready to publish before publishing them all as a batch + # + # type: Duration + #PublishMsgPeriod = "5m0s" + + # The maximum number of deals to include in a single PublishStorageDeals + # message + # + # type: uint64 + #MaxDealsPerPublishMsg = 8 + + # The maximum fee to pay per deal when sending the PublishStorageDeals message + # + # type: types.FIL + #MaxPublishDealFee = "0.5 FIL" + + # ExpectedPoRepSealDuration is the expected time it would take to seal the deal sector + # This will be used to fail the deals which cannot be sealed on time. + # + # type: Duration + #ExpectedPoRepSealDuration = "8h0m0s" + + # ExpectedSnapSealDuration is the expected time it would take to snap the deal sector + # This will be used to fail the deals which cannot be sealed on time. + # + # type: Duration + #ExpectedSnapSealDuration = "2h0m0s" + + # SkipCommP can be used to skip doing a commP check before PublishDealMessage is sent on chain + # Warning: If this check is skipped and there is a commP mismatch, all deals in the + # sector will need to be sent again + # + # type: bool + #SkipCommP = false + + [Market.StorageMarketConfig.MK12.Libp2p] + # Miners ID for which MK12 deals (boosts) should be disabled + # + # type: []string + #DisabledMiners = [] + + # Binding address for the libp2p host - 0 means random port. + # Format: multiaddress; see https://multiformats.io/multiaddr/ + # + # type: []string + #ListenAddresses = ["/ip4/0.0.0.0/tcp/12200", "/ip4/0.0.0.0/udp/12280/quic-v1/webtransport"] + + # Addresses to explicitally announce to other peers. If not specified, + # all interface addresses are announced + # Format: multiaddress + # + # type: []string + #AnnounceAddresses = [] + + # Addresses to not announce + # Format: multiaddress + # + # type: []string + #NoAnnounceAddresses = [] + + [Ingest] # Maximum number of sectors that can be queued waiting for deals to start processing. # 0 = unlimited diff --git a/documentation/en/curio-cli/curio.md b/documentation/en/curio-cli/curio.md index f8e86d4bb..5d82e1082 100644 --- a/documentation/en/curio-cli/curio.md +++ b/documentation/en/curio-cli/curio.md @@ -547,39 +547,41 @@ USAGE: curio market command [command options] [arguments...] COMMANDS: - rpc-info - seal start sealing a deal sector early - help, h Shows a list of commands or help for one command + seal start sealing a deal sector early + add-url Add URL to fetch data for offline deals + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help ``` -### curio market rpc-info +### curio market seal ``` NAME: - curio market rpc-info + curio market seal - start sealing a deal sector early USAGE: - curio market rpc-info [command options] [arguments...] + curio market seal [command options] OPTIONS: - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help + --actor value Specify actor address to start sealing sectors for + --synthetic Use synthetic PoRep (default: false) + --help, -h show help ``` -### curio market seal +### curio market add-url ``` NAME: - curio market seal - start sealing a deal sector early + curio market add-url - Add URL to fetch data for offline deals USAGE: - curio market seal [command options] + curio market add-url [command options] OPTIONS: - --actor value Specify actor address to start sealing sectors for - --synthetic Use synthetic PoRep (default: false) - --help, -h show help + --file value CSV file location to use for multiple deal input. Each line in the file should be in the format 'uuid,raw size,url,header1,header2...'" + --header HEADER, -H HEADER [ --header HEADER, -H HEADER ] Custom HEADER to include in the HTTP request + --url URL, -u URL URL to send the request to + --help, -h show help ``` ## curio fetch-params diff --git a/go.mod b/go.mod index 0ec897a05..0ed8d835d 100644 --- a/go.mod +++ b/go.mod @@ -19,9 +19,10 @@ require ( github.com/filecoin-project/go-address v1.1.0 github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 - github.com/filecoin-project/go-commp-utils v0.1.3 - github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 + github.com/filecoin-project/go-commp-utils v0.1.4 + github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8 github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 github.com/filecoin-project/go-jsonrpc v0.6.1-0.20240820160949-2cfe810e5d2f github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-state-types v0.14.0 @@ -38,24 +39,32 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.3 + github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/invopop/jsonschema v0.12.0 + github.com/ipfs/boxo v0.20.0 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-cidutil v0.1.0 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-fs-lock v0.0.7 github.com/ipfs/go-ipld-cbor v0.1.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 + github.com/ipld/go-car/v2 v2.13.1 + github.com/ipni/go-libipni v0.0.8 github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.1.0 + github.com/libp2p/go-libp2p v0.35.4 github.com/manifoldco/promptui v0.9.0 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multihash v0.2.3 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 @@ -66,7 +75,9 @@ require ( github.com/snadrus/must v0.0.0-20240605044437-98cedd57f8eb github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.5 - github.com/whyrusleeping/cbor-gen v0.1.1 + github.com/whyrusleeping/cbor-gen v0.1.2 + github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 + github.com/yugabyte/gocql v1.6.0-yb-1 github.com/yugabyte/pgx/v5 v5.5.3-yb-2 go.opencensus.io v0.24.0 go.uber.org/multierr v1.11.0 @@ -74,10 +85,10 @@ require ( golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/net v0.26.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.23.0 golang.org/x/text v0.16.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 ) require ( @@ -105,6 +116,7 @@ require ( github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/daaku/go.zipexe v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -124,11 +136,11 @@ require ( github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect github.com/filecoin-project/go-amt-ipld/v4 v4.3.0 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect - github.com/filecoin-project/go-crypto v0.0.1 // indirect - github.com/filecoin-project/go-f3 v0.0.7 // indirect + github.com/filecoin-project/go-crypto v0.1.0 // indirect + github.com/filecoin-project/go-f3 v0.2.0 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect - github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.4.0 // indirect github.com/filecoin-project/go-paramfetch v0.0.4 // indirect github.com/filecoin-project/go-statemachine v1.0.3 // indirect github.com/filecoin-project/go-storedcounter v0.1.0 // indirect @@ -159,6 +171,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 // indirect github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -167,7 +180,6 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/boxo v0.20.0 // indirect github.com/ipfs/go-blockservice v0.5.2 // indirect github.com/ipfs/go-ds-badger2 v0.1.3 // indirect github.com/ipfs/go-ds-leveldb v0.5.0 // indirect @@ -178,7 +190,6 @@ require ( github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect - github.com/ipfs/go-ipld-format v0.6.0 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-merkledag v0.11.0 // indirect @@ -186,11 +197,8 @@ require ( github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/go-verifcid v0.0.3 // indirect github.com/ipld/go-car v0.6.2 // indirect - github.com/ipld/go-car/v2 v2.13.1 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect - github.com/ipni/go-libipni v0.0.8 // indirect - github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect @@ -201,13 +209,12 @@ require ( github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.35.4 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-kad-dht v0.25.2 // indirect github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect @@ -240,7 +247,6 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -287,14 +293,16 @@ require ( github.com/valyala/fasttemplate v1.0.1 // indirect github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-filecoin-go v0.11.1 // indirect github.com/zondax/ledger-go v0.14.3 // indirect github.com/zyedidia/generic v1.2.1 // indirect + gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect + gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/bridge/opencensus v1.28.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.14.0 // indirect @@ -308,15 +316,16 @@ require ( go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.24.0 // indirect + golang.org/x/crypto v0.25.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/term v0.21.0 // indirect + golang.org/x/term v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect google.golang.org/grpc v1.64.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect diff --git a/go.sum b/go.sum index 7e5c89848..aa17e7f25 100644 --- a/go.sum +++ b/go.sum @@ -87,6 +87,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= @@ -103,6 +105,10 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -179,7 +185,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/daaku/go.zipexe v1.0.2 h1:Zg55YLYTr7M9wjKn8SY/WcpuuEi+kR2u4E8RhvpyXmk= @@ -255,7 +262,6 @@ github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38a github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= -github.com/filecoin-project/go-amt-ipld/v4 v4.2.0/go.mod h1:0eDVF7pROvxrsxvLJx+SJZXqRaXXcEPUcgb/rG0zGU4= github.com/filecoin-project/go-amt-ipld/v4 v4.3.0 h1:bY42N1gR0DqrLMCKUPzX1VhYVgXaETQm0Um4ohvyEP8= github.com/filecoin-project/go-amt-ipld/v4 v4.3.0/go.mod h1:39Ep/yBbF6xN94WevLG9qSbglBJepHa5zeEbAE1pYsc= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= @@ -267,28 +273,29 @@ github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CY github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= -github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= -github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= -github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 h1:4cITW0pwgvqLs86Q9bWQa34+jBfR1V687bDkmv2DgnA= -github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837/go.mod h1:e2YBjSblNVoBckkbv3PPqsq71q98oFkFqL7s1etViGo= +github.com/filecoin-project/go-commp-utils v0.1.4 h1:/WSsrAb0xupo+aRWRyD80lRUXAXJvYoTgDQS1pYZ1Mk= +github.com/filecoin-project/go-commp-utils v0.1.4/go.mod h1:Sekocu5q9b4ECAUFu853GFUbm8I7upAluummHFe2kFo= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8 h1:jAG2g1Fs/qoDSSaI8JaP/KmqR+QQ8IVQ6k9xKONa72M= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8/go.mod h1:kU2KuSPLB+Xz4FEbVE0abzSN4l6irZ8tqgcYWPVDftU= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= -github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-f3 v0.0.7 h1:dqmxtQXfX1r3hhFZvCszqryg80MZJmfcPFL3nhyHCVA= -github.com/filecoin-project/go-f3 v0.0.7/go.mod h1:ihW5IGLBEuW8pVc9t5MQiAhdzv95EBBfnnrGfMfEbTY= -github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/WEqcbpIsGI0= +github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= +github.com/filecoin-project/go-f3 v0.2.0 h1:Gis44+hOrDjSUEw3IDmU7CudNILi5e+bb1pgZgp680k= +github.com/filecoin-project/go-f3 v0.2.0/go.mod h1:43fBLX0iX0+Nnw4Z91wSrdfDYAd6YEDexy7GcLnIJtk= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 h1:HYIUugzjq78YvV3vC6rL95+SfC/aSTVSnZSZiDV5pCk= +github.com/filecoin-project/go-fil-commp-hashhash v0.2.0/go.mod h1:VH3fAFOru4yyWar4626IoS5+VGE8SfZiBODJLUigEo4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= -github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-hamt-ipld/v3 v3.4.0 h1:nYs6OPUF8KbZ3E8o9p9HJnQaE8iugjHR5WYVMcicDJc= +github.com/filecoin-project/go-hamt-ipld/v3 v3.4.0/go.mod h1:s0qiHRhFyrgW0SvdQMSJFQxNa4xEIG5XvqCBZUEgcbc= github.com/filecoin-project/go-jsonrpc v0.6.1-0.20240820160949-2cfe810e5d2f h1:0FMH/uwBH7RinWrE+TkiOotYoqxSM54teKx/olJ/cWs= github.com/filecoin-project/go-jsonrpc v0.6.1-0.20240820160949-2cfe810e5d2f/go.mod h1:/n/niXcS4ZQua6i37LcVbY1TmlJR0UIK9mDFQq2ICek= -github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= @@ -298,8 +305,6 @@ github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= -github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= -github.com/filecoin-project/go-state-types v0.14.0-rc1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= github.com/filecoin-project/go-state-types v0.14.0 h1:JFw8r/LA0/Hvu865Yn2Gz3R5e2woItKeHTgbT4VsXoU= github.com/filecoin-project/go-state-types v0.14.0/go.mod h1:cDbxwjbmVtV+uNi5D/cFtxKlsRqibnQNlz7xQA1EqYg= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= @@ -336,7 +341,6 @@ github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -469,9 +473,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -520,8 +522,12 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c h1:iiD+p+U0M6n/FsO6XIZuOgobnNa48FxtyYFfWwLttUQ= +github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -531,7 +537,6 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= @@ -583,10 +588,10 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= @@ -610,7 +615,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= @@ -622,7 +626,6 @@ github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1Y github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= @@ -647,15 +650,12 @@ github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= @@ -667,7 +667,6 @@ github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= @@ -697,7 +696,6 @@ github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/J github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= @@ -760,8 +758,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= -github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= +github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 h1:xWK4TZ4bRL05WQUU/3x6TG1l+IYAqdXpAeSLt/zZJc4= +github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4/go.mod h1:tlkavyke+Ac7h8R3gZIjI5LKBcvMlSWnXNMgT3vZXo8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -769,13 +767,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -785,7 +780,6 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -937,7 +931,6 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -959,7 +952,6 @@ github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKt github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= @@ -985,7 +977,6 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= @@ -995,8 +986,6 @@ github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1004,7 +993,6 @@ github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1109,7 +1097,6 @@ github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1166,7 +1153,6 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1219,7 +1205,6 @@ github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrx github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/snadrus/must v0.0.0-20240605044437-98cedd57f8eb h1:78YgPq3NbWnO4xyNhLsn2zitc7NiZpjQZ560rsxVLm4= @@ -1278,6 +1263,7 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc= github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -1286,12 +1272,10 @@ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8W github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73 h1:CMM9+/AgM77vaMXMQedzqPRMuNwjbI0EcdofPqxc9F8= @@ -1306,17 +1290,15 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:x github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.1.0/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= -github.com/whyrusleeping/cbor-gen v0.1.1 h1:eKfcJIoxivjMtwfCfmJAqSF56MHcWqyIScXwaC1VBgw= -github.com/whyrusleeping/cbor-gen v0.1.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= +github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -1335,6 +1317,8 @@ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZM github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/yugabyte/gocql v1.6.0-yb-1 h1:3anNiHsJwKQ8Dn7RdmkTEuIzV1l7e9QJZ8wkOZ87ELg= +github.com/yugabyte/gocql v1.6.0-yb-1/go.mod h1:LAokR6+vevDCrTxk52U7p6ki+4qELu4XU7JUGYa2O2M= github.com/yugabyte/pgx/v5 v5.5.3-yb-2 h1:SDk2waZb2o6dSLYqk+vq0Ur2jnIv+X2A+P+QPR1UThU= github.com/yugabyte/pgx/v5 v5.5.3-yb-2/go.mod h1:2SxizGfDY7UDCRTtbI/xd98C/oGN7S/3YoGF8l9gx/c= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1351,6 +1335,10 @@ github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfU github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc= github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= +gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA= +gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8= +gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q= +gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02/go.mod h1:JTnUj0mpYiAsuZLmKjTx/ex3AtMowcCgnE7YNyCEP0I= go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= @@ -1433,21 +1421,16 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1532,11 +1515,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= @@ -1632,11 +1613,9 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1648,13 +1627,11 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1662,28 +1639,24 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1691,7 +1664,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -1765,8 +1737,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -1879,6 +1851,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1892,7 +1865,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1908,8 +1880,6 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/harmony/harmonydb/sql/20240228-piece-park.sql b/harmony/harmonydb/sql/20240228-piece-park.sql index add0a4093..efd529da7 100644 --- a/harmony/harmonydb/sql/20240228-piece-park.sql +++ b/harmony/harmonydb/sql/20240228-piece-park.sql @@ -33,5 +33,8 @@ create table parked_piece_refs ( data_url text, data_headers jsonb not null default '{}', + -- host Added in 202240730-market-migrations.sql + -- host text, + foreign key (piece_id) references parked_pieces(id) on delete cascade ); diff --git a/harmony/harmonydb/sql/20240731-market-migration.sql b/harmony/harmonydb/sql/20240731-market-migration.sql new file mode 100644 index 000000000..7eaef853b --- /dev/null +++ b/harmony/harmonydb/sql/20240731-market-migration.sql @@ -0,0 +1,336 @@ +-- Table for Mk12 or Boost deals (Main deal table) +-- Stores the deal received over the network. +-- Entries are created by mk12 module and this will be used +-- by UI to show deal details. Entries should never be removed from this table. +CREATE TABLE market_mk12_deals ( + uuid TEXT NOT NULL, + sp_id BIGINT NOT NULL, + + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + + signed_proposal_cid TEXT NOT NULL, + proposal_signature BYTEA NOT NULL, + proposal jsonb NOT NULL, + + offline BOOLEAN NOT NULL, + verified BOOLEAN NOT NULL, + + start_epoch BIGINT NOT NULL, + end_epoch BIGINT NOT NULL, + + client_peer_id TEXT NOT NULL, + + chain_deal_id BIGINT DEFAULT NULL, + publish_cid TEXT DEFAULT NULL, + + piece_cid TEXT NOT NULL, + piece_size BIGINT NOT NULL, + + fast_retrieval BOOLEAN NOT NULL, + announce_to_ipni BOOLEAN NOT NULL, + + url TEXT DEFAULT NULL, + url_headers jsonb NOT NULL DEFAULT '{}', + + error TEXT DEFAULT NULL, + + primary key (uuid, sp_id, piece_cid, signed_proposal_cid), + unique (uuid), + unique (signed_proposal_cid) +); + +-- This table is used for storing piece metadata (piece indexing). Entries are added by task_indexing. +-- It is also used to track if a piece is indexed or not. +-- Version is used to track changes of how metadata is stored. +-- Cleanup for this table will be created in a later stage. +CREATE TABLE market_piece_metadata ( + piece_cid TEXT NOT NULL PRIMARY KEY, + + version INT NOT NULL DEFAULT 2, -- Boost stored in version 1. This is version 2. + + created_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + + indexed BOOLEAN NOT NULL DEFAULT FALSE, + indexed_at TIMESTAMPTZ NOT NULL DEFAULT TIMEZONE('UTC', NOW()), + + constraint market_piece_meta_identity_key + unique (piece_cid) +); + +-- This table binds the piece metadata to specific deals (piece indexing). Entries are added by task_indexing. +-- This along with market_mk12_deals is used to retrievals as well as +-- deal detail page in UI. +-- Cleanup for this table will be created in a later stage. +CREATE TABLE market_piece_deal ( + id TEXT NOT NULL, -- (UUID for new deals, PropCID for old) + piece_cid TEXT NOT NULL, + + boost_deal BOOLEAN NOT NULL, + legacy_deal BOOLEAN NOT NULL DEFAULT FALSE, + + chain_deal_id BIGINT NOT NULL DEFAULT 0, + + sp_id BIGINT NOT NULL, + sector_num BIGINT NOT NULL, + + piece_offset BIGINT NOT NULL, + piece_length BIGINT NOT NULL, + raw_size BIGINT NOT NULL, + + primary key (sp_id, piece_cid, id), + constraint market_piece_deal_identity_key + unique (sp_id, id) +); + +-- This function is used to insert piece metadata and piece deal (piece indexing) +-- This makes it easy to keep the logic of how table is updated and fast (in DB). +CREATE OR REPLACE FUNCTION process_piece_deal( + _id TEXT, + _piece_cid TEXT, + _boost_deal BOOLEAN, + _sp_id BIGINT, + _sector_num BIGINT, + _piece_offset BIGINT, + _piece_length BIGINT, + _raw_size BIGINT, + _indexed BOOLEAN, + _legacy_deal BOOLEAN DEFAULT FALSE, + _chain_deal_id BIGINT DEFAULT 0 +) +RETURNS VOID AS $$ +BEGIN + -- Insert or update the market_piece_metadata table +INSERT INTO market_piece_metadata (piece_cid, indexed) +VALUES (_piece_cid, _indexed) + ON CONFLICT (piece_cid) DO UPDATE SET + indexed = CASE + WHEN market_piece_metadata.indexed = FALSE THEN EXCLUDED.indexed + ELSE market_piece_metadata.indexed +END; + + -- Insert into the market_piece_deal table +INSERT INTO market_piece_deal ( + id, piece_cid, boost_deal, legacy_deal, chain_deal_id, + sp_id, sector_num, piece_offset, piece_length, raw_size + ) VALUES ( + _id, _piece_cid, _boost_deal, _legacy_deal, _chain_deal_id, + _sp_id, _sector_num, _piece_offset, _piece_length, _raw_size + ) ON CONFLICT (sp_id, piece_cid, id) DO NOTHING; + +END; +$$ LANGUAGE plpgsql; + +-- Storage Ask for ask protocol over libp2p +-- Entries for each MinerID must be present. These are updated by SetAsk method in mk12. +CREATE TABLE market_mk12_storage_ask ( + sp_id BIGINT NOT NULL, + + price BIGINT NOT NULL, + verified_price BIGINT NOT NULL, + + min_size BIGINT NOT NULL, + max_size BIGINT NOT NULL, + + created_at BIGINT NOT NULL, + expiry BIGINT NOT NULL, + + sequence BIGINT NOT NULL, + unique (sp_id) +); + +-- Used for processing Mk12 deals. This tables tracks the deal +-- throughout their lifetime. Entries are added ad the same time as market_mk12_deals. +-- Cleanup is done for complete deals by GC task. +CREATE TABLE market_mk12_deal_pipeline ( + uuid TEXT NOT NULL, + sp_id BIGINT NOT NULL, + + started BOOLEAN DEFAULT FALSE, + + piece_cid TEXT NOT NULL, + piece_size BIGINT NOT NULL, + raw_size BIGINT DEFAULT NULL, + + offline BOOLEAN NOT NULL, + + url TEXT DEFAULT NULL, + headers jsonb NOT NULL DEFAULT '{}', + + commp_task_id BIGINT DEFAULT NULL, + after_commp BOOLEAN DEFAULT FALSE, + + psd_task_id BIGINT DEFAULT NULL, + after_psd BOOLEAN DEFAULT FALSE, + + psd_wait_time TIMESTAMPTZ, + + find_deal_task_id BIGINT DEFAULT NULL, + after_find_deal BOOLEAN DEFAULT FALSE, + + sector BIGINT DEFAULT NULL, + reg_seal_proof INT DEFAULT NULL, + sector_offset BIGINT DEFAULT NULL, + + sealed BOOLEAN DEFAULT FALSE, + + should_index BOOLEAN DEFAULT FALSE, + indexing_created_at TIMESTAMPTZ, + indexing_task_id BIGINT DEFAULT NULL, + indexed BOOLEAN DEFAULT FALSE, + + complete BOOLEAN NOT NULL DEFAULT FALSE, + + constraint market_mk12_deal_pipeline_identity_key unique (uuid) +); + +-- This function creates indexing task based from move_storage tasks +CREATE OR REPLACE FUNCTION create_indexing_task(task_id BIGINT, sealing_table TEXT) +RETURNS VOID AS $$ +DECLARE +query TEXT; -- Holds the dynamic SQL query + pms RECORD; -- Holds each row returned by the query in the loop +BEGIN + -- Construct the dynamic SQL query based on the sealing_table + IF sealing_table = 'sectors_sdr_pipeline' THEN + query := format( + 'SELECT + dp.uuid, + ssp.reg_seal_proof + FROM + %I ssp + JOIN + market_mk12_deal_pipeline dp ON ssp.sp_id = dp.sp_id AND ssp.sector_num = dp.sector + WHERE + ssp.task_id_move_storage = $1', sealing_table); + ELSIF sealing_table = 'sectors_snap_pipeline' THEN + query := format( + 'SELECT + dp.uuid, + (SELECT reg_seal_proof FROM sectors_meta WHERE sp_id = ssp.sp_id AND sector_num = ssp.sector_num) AS reg_seal_proof + FROM + %I ssp + JOIN + market_mk12_deal_pipeline dp ON ssp.sp_id = dp.sp_id AND ssp.sector_num = dp.sector + WHERE + ssp.task_id_move_storage = $1', sealing_table); +ELSE + RAISE EXCEPTION 'Invalid sealing_table name: %', sealing_table; +END IF; + + -- Execute the dynamic SQL query with the task_id parameter +FOR pms IN EXECUTE query USING task_id + LOOP + -- Update the market_mk12_deal_pipeline table with the reg_seal_proof and indexing_created_at values +UPDATE market_mk12_deal_pipeline +SET + reg_seal_proof = pms.reg_seal_proof, + indexing_created_at = NOW() AT TIME ZONE 'UTC' +WHERE + uuid = pms.uuid; +END LOOP; + + -- If everything is successful, simply exit + RETURN; + +EXCEPTION + WHEN OTHERS THEN + -- Rollback the transaction and raise the exception for Go to catch + ROLLBACK; + RAISE EXCEPTION 'Failed to create indexing task: %', SQLERRM; +END; +$$ LANGUAGE plpgsql; + +-- This table can be used to track remote piece for offline deals +-- The entries must be created by users. Entry is removed when deal is +-- removed from market_mk12_deal_pipeline table using a key constraint +CREATE TABLE market_offline_urls ( + uuid TEXT NOT NULL, + + url TEXT NOT NULL, + headers jsonb NOT NULL DEFAULT '{}', + + raw_size BIGINT NOT NULL, + + CONSTRAINT market_offline_urls_uuid_fk FOREIGN KEY (uuid) + REFERENCES market_mk12_deal_pipeline (uuid) + ON DELETE CASCADE, + CONSTRAINT market_offline_urls_uuid_unique UNIQUE (uuid) +); + +-- This table is used for coordinating libp2p nodes +CREATE TABLE libp2p ( + priv_key BYTEA NOT NULL, + running_on TEXT DEFAULT NULL, + updated_at TIMESTAMPTZ DEFAULT NULL +); + +-- -- Function used to update the libp2p table +CREATE OR REPLACE FUNCTION update_libp2p_node(_running_on TEXT) +RETURNS VOID AS $$ +DECLARE +current_running_on TEXT; + last_updated TIMESTAMPTZ; +BEGIN + -- Fetch the current values of running_on and updated_at + SELECT running_on, updated_at INTO current_running_on, last_updated + FROM libp2p + WHERE running_on IS NOT NULL + LIMIT 1; + + -- If running_on is already set + IF current_running_on IS NOT NULL THEN + -- Check if updated_at is more than 5 minutes old + IF last_updated < NOW() - INTERVAL '5 minutes' THEN + -- Update running_on and updated_at + UPDATE libp2p + SET running_on = _running_on, + updated_at = NOW() AT TIME ZONE 'UTC' + WHERE running_on = current_running_on; + ELSE + -- Raise an exception if the node was updated within the last 5 minutes + RAISE EXCEPTION 'Libp2p node already running on "%"', current_running_on; + END IF; + ELSE + -- If running_on is NULL, set it and update the timestamp + UPDATE libp2p + SET running_on = _running_on, + updated_at = NOW() AT TIME ZONE 'UTC' + WHERE running_on IS NULL; + END IF; +END; +$$ LANGUAGE plpgsql; + + +-- Table for old lotus market deals. This is just for deal +-- which are still alive. It should not be used for any processing +CREATE TABLE market_legacy_deals ( + signed_proposal_cid TEXT NOT NULL, + sp_id BIGINT NOT NULL, + client_peer_id TEXT NOT NULL, + + proposal_signature BYTEA NOT NULL, + proposal jsonb NOT NULL, + + piece_cid TEXT NOT NULL, + piece_size BIGINT NOT NULL, + + offline BOOLEAN NOT NULL, + verified BOOLEAN NOT NULL, + + start_epoch BIGINT NOT NULL, + end_epoch BIGINT NOT NULL, + + publish_cid TEXT NOT NULL, + chain_deal_id BIGINT NOT NULL, + + fast_retrieval BOOLEAN NOT NULL, + + created_at TIMESTAMPTZ NOT NULL, + sector_num BIGINT NOT NULL, + + primary key (sp_id, piece_cid, signed_proposal_cid) +); + + + + diff --git a/harmony/harmonytask/harmonytask.go b/harmony/harmonytask/harmonytask.go index 9fd3cfbb9..acb6b732c 100644 --- a/harmony/harmonytask/harmonytask.go +++ b/harmony/harmonytask/harmonytask.go @@ -410,6 +410,10 @@ func (e *TaskEngine) Resources() resources.Resources { return e.reg.Resources } +func (e *TaskEngine) Host() string { + return e.hostAndPort +} + // About the Registry // This registry exists for the benefit of "static methods" of TaskInterface extensions. // For example, GetSPID(db, taskID) (int, err) is a static method that can be called diff --git a/itests/alertnow_test.go b/itests/alertnow_test.go index d826f34fc..53c947a25 100644 --- a/itests/alertnow_test.go +++ b/itests/alertnow_test.go @@ -20,7 +20,9 @@ func TestAlertNow(t *testing.T) { tp, } // Create dependencies - db, err := harmonydb.NewFromConfigWithITestID(t, "alertnow") + sharedITestID := harmonydb.ITestNewID() + db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + require.NoError(t, err) an := alertmanager.NewAlertNow(db, "alertNowMachine") @@ -33,7 +35,7 @@ func TestAlertNow(t *testing.T) { done, err := at.Do(123, func() bool { return true }) require.NoError(t, err) require.True(t, done) - require.Equal(t, "alertNowMachine: testMessage", tp.output) + require.Equal(t, "Machine alertNowMachine: testMessage", tp.output) } // testPlugin is a test plugin @@ -42,6 +44,6 @@ type testPlugin struct { } func (tp *testPlugin) SendAlert(data *plugin.AlertPayload) error { - tp.output = data.Summary + tp.output = data.Details["NowCheck"].(string) return nil } diff --git a/itests/curio_test.go b/itests/curio_test.go index 003853a17..b9217fec1 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -31,7 +31,8 @@ import ( "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/ffiselect" - "github.com/filecoin-project/curio/market/lmrpc" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/indexstore" "github.com/filecoin-project/curio/tasks/seal" lapi "github.com/filecoin-project/lotus/api" @@ -41,7 +42,6 @@ import ( "github.com/filecoin-project/lotus/cli/spcli/createminer" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) func TestCurioNewActor(t *testing.T) { @@ -127,6 +127,11 @@ func TestCurioHappyPath(t *testing.T) { db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) require.NoError(t, err) + defer db.ITestDeleteAll() + + idxStore, err := indexstore.NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, config.DefaultCurioConfig()) + require.NoError(t, err) + var titles []string err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) require.NoError(t, err) @@ -166,7 +171,7 @@ func TestCurioHappyPath(t *testing.T) { _ = os.Remove(dir) }() - capi, enginerTerm, closure, finishCh := ConstructCurioTest(ctx, t, dir, db, full, maddr, baseCfg) + capi, enginerTerm, closure, finishCh := ConstructCurioTest(ctx, t, dir, db, idxStore, full, maddr, baseCfg) defer enginerTerm() defer closure() @@ -183,32 +188,54 @@ func TestCurioHappyPath(t *testing.T) { spt, err := miner2.PreferredSealProofTypeFromWindowPoStType(nv, wpt, false) require.NoError(t, err) - num, err := seal.AllocateSectorNumbers(ctx, full, db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - for _, n := range numbers { + comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + num, err := seal.AllocateSectorNumbers(ctx, full, tx, maddr, 1) + if err != nil { + return false, err + } + require.Len(t, num, 1) + + for _, n := range num { _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt) if err != nil { return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) } } + + if err != nil { + return false, xerrors.Errorf("allocating sector numbers: %w", err) + } return true, nil }) + require.NoError(t, err) - require.Len(t, num, 1) + require.True(t, comm) spt, err = miner2.PreferredSealProofTypeFromWindowPoStType(nv, wpt, true) require.NoError(t, err) - num, err = seal.AllocateSectorNumbers(ctx, full, db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - for _, n := range numbers { + comm, err = db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + num, err := seal.AllocateSectorNumbers(ctx, full, tx, maddr, 1) + if err != nil { + return false, err + } + require.Len(t, num, 1) + + for _, n := range num { _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt) if err != nil { return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) } } + + if err != nil { + return false, xerrors.Errorf("allocating sector numbers: %w", err) + } return true, nil }) require.NoError(t, err) - require.Len(t, num, 1) + require.True(t, comm) + // TODO: add DDO deal, f05 deal 2 MiB each in the sector var sectorParamsArr []struct { @@ -320,7 +347,7 @@ func createCliContext(dir string) (*cli.Context, error) { return ctx, nil } -func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmonydb.DB, full v1api.FullNode, maddr address.Address, cfg *config.CurioConfig) (api.Curio, func(), jsonrpc.ClientCloser, <-chan struct{}) { +func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmonydb.DB, idx *indexstore.IndexStore, full v1api.FullNode, maddr address.Address, cfg *config.CurioConfig) (api.Curio, func(), jsonrpc.ClientCloser, <-chan struct{}) { ffiselect.IsTest = true cctx, err := createCliContext(dir) @@ -340,6 +367,7 @@ func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmo dependencies := &deps.Deps{} dependencies.DB = db dependencies.Chain = full + dependencies.IndexStore = idx seal.SetDevnet(true) err = os.Setenv("CURIO_REPO_PATH", dir) require.NoError(t, err) @@ -349,10 +377,6 @@ func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmo taskEngine, err := tasks.StartTasks(ctx, dependencies) require.NoError(t, err) - dependencies.Cfg.Subsystems.BoostAdapters = []string{fmt.Sprintf("%s:127.0.0.1:32000", maddr)} - err = lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Chain, dependencies.Cfg) - require.NoError(t, err) - go func() { err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. require.NoError(t, err) @@ -416,3 +440,10 @@ func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmo return capi, taskEngine.GracefullyTerminate, ccloser, finishCh } + +func envElse(env, els string) string { + if v := os.Getenv(env); v != "" { + return v + } + return els +} diff --git a/lib/dealdata/dealdata.go b/lib/dealdata/dealdata.go index 8b00c37c4..c3e90b169 100644 --- a/lib/dealdata/dealdata.go +++ b/lib/dealdata/dealdata.go @@ -2,7 +2,9 @@ package dealdata import ( "context" + "encoding/json" "io" + "net/http" "net/url" "strconv" @@ -18,10 +20,10 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/filler" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("dealdata") @@ -32,7 +34,7 @@ type dealMetadata struct { PieceSize int64 `db:"piece_size"` DataUrl *string `db:"data_url"` - DataHeaders *[]byte `db:"data_headers"` + DataHeaders []byte `db:"data_headers"` DataRawSize *int64 `db:"data_raw_size"` DataDelOnFinalize bool `db:"data_delete_on_finalize"` @@ -134,6 +136,12 @@ func getDealMetadata(ctx context.Context, db *harmonydb.DB, sc *ffi.SealCalls, s return nil, xerrors.Errorf("parsing data URL: %w", err) } + hdrs := http.Header{} + err = json.Unmarshal(p.DataHeaders, &hdrs) + if err != nil { + return nil, xerrors.Errorf("parsing data headers: %w", err) + } + if goUrl.Scheme == "pieceref" { // url is to a piece reference @@ -165,7 +173,7 @@ func getDealMetadata(ctx context.Context, db *harmonydb.DB, sc *ffi.SealCalls, s reader, _ := padreader.New(pr, uint64(*p.DataRawSize)) pieceReaders = append(pieceReaders, reader) } else { - reader, _ := padreader.New(NewUrlReader(dataUrl, *p.DataRawSize), uint64(*p.DataRawSize)) + reader, _ := padreader.New(NewUrlReader(dataUrl, hdrs, *p.DataRawSize), uint64(*p.DataRawSize)) pieceReaders = append(pieceReaders, reader) } diff --git a/lib/dealdata/urlpiecereader.go b/lib/dealdata/urlpiecereader.go index 5324f2cfa..a7c5d683d 100644 --- a/lib/dealdata/urlpiecereader.go +++ b/lib/dealdata/urlpiecereader.go @@ -3,12 +3,14 @@ package dealdata import ( "io" "net/http" + "net/url" "golang.org/x/xerrors" ) type UrlPieceReader struct { Url string + Headers http.Header RawSize int64 // the exact number of bytes read, if we read more or less that's an error readSoFar int64 @@ -16,10 +18,11 @@ type UrlPieceReader struct { active io.ReadCloser // auto-closed on EOF } -func NewUrlReader(p string, rs int64) *UrlPieceReader { +func NewUrlReader(p string, h http.Header, rs int64) *UrlPieceReader { return &UrlPieceReader{ Url: p, RawSize: rs, + Headers: h, } } @@ -31,9 +34,32 @@ func (u *UrlPieceReader) Read(p []byte) (n int, err error) { // If 'active' is nil, initiate the HTTP request if u.active == nil { - resp, err := http.Get(u.Url) + goUrl, err := url.Parse(u.Url) if err != nil { - return 0, err + return 0, xerrors.Errorf("failed to parse the URL: %w", err) + } + + if goUrl.Scheme != "https" && goUrl.Scheme != "http" { + return 0, xerrors.Errorf("URL scheme %s not supported", goUrl.Scheme) + } + + req, err := http.NewRequest(http.MethodGet, goUrl.String(), nil) + if err != nil { + return 0, xerrors.Errorf("error creating request: %w", err) + } + + // Add custom headers for security and authentication + req.Header = u.Headers + + // Create a client and make the request + client := &http.Client{} + + resp, err := client.Do(req) + if err != nil { + return 0, xerrors.Errorf("error making GET request: %w", err) + } + if resp.StatusCode != 200 { + return 0, xerrors.Errorf("a non 200 response code: %s", resp.Status) } // Set 'active' to the response body diff --git a/lib/ffi/piece_funcs.go b/lib/ffi/piece_funcs.go index 78cc8a101..b7a582882 100644 --- a/lib/ffi/piece_funcs.go +++ b/lib/ffi/piece_funcs.go @@ -9,8 +9,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/curio/harmony/harmonytask" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, pieceID storiface.PieceNumber, size int64, data io.Reader) error { diff --git a/lib/ffi/sdr_funcs.go b/lib/ffi/sdr_funcs.go index ebe26be86..78dbfd656 100644 --- a/lib/ffi/sdr_funcs.go +++ b/lib/ffi/sdr_funcs.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/lib/ffiselect" "github.com/filecoin-project/curio/lib/proof" + storiface "github.com/filecoin-project/curio/lib/storiface" // TODO everywhere here that we call this we should call our proxy instead. ffi "github.com/filecoin-project/filecoin-ffi" @@ -28,8 +29,6 @@ import ( "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/proofpaths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const C1CheckNumber = 3 diff --git a/lib/ffi/sdr_funcs_test.go b/lib/ffi/sdr_funcs_test.go index c4db815e7..6a3edcf2e 100644 --- a/lib/ffi/sdr_funcs_test.go +++ b/lib/ffi/sdr_funcs_test.go @@ -4,7 +4,7 @@ import ( "path/filepath" "testing" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) func TestChangePathType(t *testing.T) { diff --git a/lib/ffi/snap_funcs.go b/lib/ffi/snap_funcs.go index 571e04cc9..7cbfc707d 100644 --- a/lib/ffi/snap_funcs.go +++ b/lib/ffi/snap_funcs.go @@ -21,10 +21,10 @@ import ( "github.com/filecoin-project/curio/lib/ffiselect" paths2 "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/proof" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/tarutil" "github.com/filecoin-project/lotus/storage/sealer/fr32" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) func (sb *SealCalls) EncodeUpdate( diff --git a/lib/ffi/task_storage.go b/lib/ffi/task_storage.go index 669836352..8924eea87 100644 --- a/lib/ffi/task_storage.go +++ b/lib/ffi/task_storage.go @@ -12,8 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" storagePaths "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type SectorRef struct { diff --git a/lib/ffiselect/ffidirect/ffi-direct.go b/lib/ffiselect/ffidirect/ffi-direct.go index 48dde6c58..3dbb99e9a 100644 --- a/lib/ffiselect/ffidirect/ffi-direct.go +++ b/lib/ffiselect/ffidirect/ffi-direct.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/proof" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) // This allow reflection access to the FFI functions. diff --git a/lib/ffiselect/ffiselect.go b/lib/ffiselect/ffiselect.go index fcb5ee33d..5122c8453 100644 --- a/lib/ffiselect/ffiselect.go +++ b/lib/ffiselect/ffiselect.go @@ -19,8 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/proof" "github.com/filecoin-project/curio/build" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) type logCtxKt struct{} diff --git a/lib/partialfile/partialfile.go b/lib/partialfile/partialfile.go new file mode 100644 index 000000000..ac02aafbf --- /dev/null +++ b/lib/partialfile/partialfile.go @@ -0,0 +1,346 @@ +package partialfile + +import ( + "encoding/binary" + "io" + "os" + "syscall" + + "github.com/detailyang/go-fallocate" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/storiface" + + "github.com/filecoin-project/lotus/lib/readerutil" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" +) + +var log = logging.Logger("partialfile") + +const veryLargeRle = 1 << 20 + +// Sectors can be partially unsealed. We support this by appending a small +// trailer to each unsealed sector file containing an RLE+ marking which bytes +// in a sector are unsealed, and which are not (holes) + +// unsealed sector files internally have this structure +// [unpadded (raw) data][rle+][4B LE length fo the rle+ field] + +type PartialFile struct { + maxPiece abi.PaddedPieceSize + + path string + allocated rlepluslazy.RLE + + file *os.File +} + +func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) error { + trailer, err := rlepluslazy.EncodeRuns(r, nil) + if err != nil { + return xerrors.Errorf("encoding trailer: %w", err) + } + + // maxPieceSize == unpadded(sectorSize) == trailer start + if _, err := w.Seek(maxPieceSize, io.SeekStart); err != nil { + return xerrors.Errorf("seek to trailer start: %w", err) + } + + rb, err := w.Write(trailer) + if err != nil { + return xerrors.Errorf("writing trailer data: %w", err) + } + + if err := binary.Write(w, binary.LittleEndian, uint32(len(trailer))); err != nil { + return xerrors.Errorf("writing trailer length: %w", err) + } + + return w.Truncate(maxPieceSize + int64(rb) + 4) +} + +func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) // nolint + if err != nil { + return nil, xerrors.Errorf("opening partial file '%s': %w", path, err) + } + + err = func() error { + err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { + log.Warnf("could not allocate space, ignoring: %v", errno) + err = nil // log and ignore + } + } + if err != nil { + return xerrors.Errorf("fallocate '%s': %w", path, err) + } + + if err := writeTrailer(int64(maxPieceSize), f, &rlepluslazy.RunSliceIterator{}); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil + }() + if err != nil { + _ = f.Close() + return nil, err + } + if err := f.Close(); err != nil { + return nil, xerrors.Errorf("close empty partial file: %w", err) + } + + return OpenPartialFile(maxPieceSize, path) +} + +func OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { + f, err := os.OpenFile(path, os.O_RDWR, 0644) // nolint + if err != nil { + return nil, xerrors.Errorf("opening partial file '%s': %w", path, err) + } + + st, err := f.Stat() + if err != nil { + return nil, xerrors.Errorf("stat '%s': %w", path, err) + } + if st.Size() < int64(maxPieceSize) { + return nil, xerrors.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize) + } + if st.Size() == int64(maxPieceSize) { + log.Debugw("no partial file trailer, assuming fully allocated", "path", path) + + allAlloc := &rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{{Val: true, Len: uint64(maxPieceSize)}}} + enc, err := rlepluslazy.EncodeRuns(allAlloc, []byte{}) + if err != nil { + return nil, xerrors.Errorf("encoding full allocation: %w", err) + } + + rle, err := rlepluslazy.FromBuf(enc) + if err != nil { + return nil, xerrors.Errorf("decoding full allocation: %w", err) + } + + return &PartialFile{ + maxPiece: maxPieceSize, + path: path, + allocated: rle, + file: f, + }, nil + } + + var rle rlepluslazy.RLE + err = func() error { + // read trailer + var tlen [4]byte + _, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen))) + if err != nil { + return xerrors.Errorf("reading trailer length: %w", err) + } + + // sanity-check the length + trailerLen := binary.LittleEndian.Uint32(tlen[:]) + expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize) + if expectLen != st.Size() { + return xerrors.Errorf("file '%s' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize) + } + if trailerLen > veryLargeRle { + log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen) + } + + trailerStart := st.Size() - int64(len(tlen)) - int64(trailerLen) + if trailerStart != int64(maxPieceSize) { + return xerrors.Errorf("expected sector size to equal trailer start index") + } + + trailerBytes := make([]byte, trailerLen) + _, err = f.ReadAt(trailerBytes, trailerStart) + if err != nil { + return xerrors.Errorf("reading trailer: %w", err) + } + + rle, err = rlepluslazy.FromBuf(trailerBytes) + if err != nil { + return xerrors.Errorf("decoding trailer: %w", err) + } + + it, err := rle.RunIterator() + if err != nil { + return xerrors.Errorf("getting trailer run iterator: %w", err) + } + + f, err := rlepluslazy.Fill(it) + if err != nil { + return xerrors.Errorf("filling bitfield: %w", err) + } + lastSet, err := rlepluslazy.Count(f) + if err != nil { + return xerrors.Errorf("finding last set byte index: %w", err) + } + + if lastSet > uint64(maxPieceSize) { + return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize) + } + + return nil + }() + if err != nil { + _ = f.Close() + return nil, err + } + + return &PartialFile{ + maxPiece: maxPieceSize, + path: path, + allocated: rle, + file: f, + }, nil +} + +func (pf *PartialFile) Close() error { + return pf.file.Close() +} + +func (pf *PartialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, PieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c > 0 { + log.Warnf("getting partial file writer overwriting %d allocated bytes", c) + } + } + + return pf.file, nil +} + +func (pf *PartialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + ored, err := rlepluslazy.Or(have, PieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, ored); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil +} + +func (pf *PartialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { + have, err := pf.allocated.RunIterator() + if err != nil { + return err + } + + if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil { + return xerrors.Errorf("deallocating: %w", err) + } + + s, err := rlepluslazy.Subtract(have, PieceRun(offset, size)) + if err != nil { + return err + } + + if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil { + return xerrors.Errorf("writing trailer: %w", err) + } + + return nil +} + +// Reader forks off a new reader from the underlying file, and returns a reader +// starting at the given offset and reading the given size. Safe for concurrent +// use. +func (pf *PartialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Reader, error) { + if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { + return nil, xerrors.Errorf("seek piece start: %w", err) + } + + { + have, err := pf.allocated.RunIterator() + if err != nil { + return nil, err + } + + and, err := rlepluslazy.And(have, PieceRun(offset, size)) + if err != nil { + return nil, err + } + + c, err := rlepluslazy.Count(and) + if err != nil { + return nil, err + } + + if c != uint64(size) { + log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size)-c) + } + } + + return io.LimitReader(readerutil.NewReadSeekerFromReaderAt(pf.file, int64(offset)), int64(size)), nil +} + +func (pf *PartialFile) Allocated() (rlepluslazy.RunIterator, error) { + return pf.allocated.RunIterator() +} + +func (pf *PartialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + have, err := pf.Allocated() + if err != nil { + return false, err + } + + u, err := rlepluslazy.And(have, PieceRun(offset.Padded(), size.Padded())) + if err != nil { + return false, err + } + + uc, err := rlepluslazy.Count(u) + if err != nil { + return false, err + } + + return abi.PaddedPieceSize(uc) == size.Padded(), nil +} + +func PieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { + var runs []rlepluslazy.Run + if offset > 0 { + runs = append(runs, rlepluslazy.Run{ + Val: false, + Len: uint64(offset), + }) + } + + runs = append(runs, rlepluslazy.Run{ + Val: true, + Len: uint64(size), + }) + + return &rlepluslazy.RunSliceIterator{Runs: runs} +} diff --git a/lib/paths/db_index.go b/lib/paths/db_index.go index 54d42f1ec..e30ffbb76 100644 --- a/lib/paths/db_index.go +++ b/lib/paths/db_index.go @@ -21,10 +21,10 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/paths/alertinginterface" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const NoMinerFilter = abi.ActorID(0) diff --git a/lib/paths/http_handler.go b/lib/paths/http_handler.go index 936fffdda..c308dedf5 100644 --- a/lib/paths/http_handler.go +++ b/lib/paths/http_handler.go @@ -15,10 +15,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/partialfile" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/tarutil" - - "github.com/filecoin-project/lotus/storage/sealer/partialfile" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("stores") diff --git a/lib/paths/http_handler_test.go b/lib/paths/http_handler_test.go index 55ee45929..e5d067a35 100644 --- a/lib/paths/http_handler_test.go +++ b/lib/paths/http_handler_test.go @@ -15,11 +15,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/partialfile" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/paths/mocks" - - "github.com/filecoin-project/lotus/storage/sealer/partialfile" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) func TestRemoteGetAllocated(t *testing.T) { diff --git a/lib/paths/index.go b/lib/paths/index.go index c217518dc..35e667fe1 100644 --- a/lib/paths/index.go +++ b/lib/paths/index.go @@ -9,8 +9,9 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var HeartbeatInterval = 10 * time.Second diff --git a/lib/paths/index_locks.go b/lib/paths/index_locks.go index ab95cb4a7..c5c2e8f87 100644 --- a/lib/paths/index_locks.go +++ b/lib/paths/index_locks.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type sectorLock struct { diff --git a/lib/paths/index_locks_test.go b/lib/paths/index_locks_test.go index d3134055b..d53e4da80 100644 --- a/lib/paths/index_locks_test.go +++ b/lib/paths/index_locks_test.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) var aSector = abi.SectorID{ diff --git a/lib/paths/interface.go b/lib/paths/interface.go index f376ef284..3b04225ce 100644 --- a/lib/paths/interface.go +++ b/lib/paths/interface.go @@ -8,9 +8,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/partialfile" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/partialfile" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) //go:generate go run github.com/golang/mock/mockgen -destination=mocks/pf.go -package=mocks . PartialFileHandler diff --git a/lib/paths/local.go b/lib/paths/local.go index f2e94f96a..9510d7704 100644 --- a/lib/paths/local.go +++ b/lib/paths/local.go @@ -22,11 +22,11 @@ import ( "github.com/filecoin-project/go-state-types/proof" cuproof "github.com/filecoin-project/curio/lib/proof" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/supraffi" "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type LocalStorage interface { diff --git a/lib/paths/local_test.go b/lib/paths/local_test.go index f9b5e24a3..92c9b5074 100644 --- a/lib/paths/local_test.go +++ b/lib/paths/local_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/curio/harmony/harmonydb" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const pathSize = 16 << 20 diff --git a/lib/paths/localstorage.go b/lib/paths/localstorage.go index 6f67a18f8..d3050a776 100644 --- a/lib/paths/localstorage.go +++ b/lib/paths/localstorage.go @@ -1,9 +1,19 @@ package paths import ( - "github.com/filecoin-project/lotus/node/config" + "encoding/json" + "errors" + "io" + "io/fs" + "os" + gopath "path" + + "github.com/mitchellh/go-homedir" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type BasicLocalStorage struct { @@ -14,7 +24,7 @@ var _ LocalStorage = &BasicLocalStorage{} func (ls *BasicLocalStorage) GetStorage() (storiface.StorageConfig, error) { var def storiface.StorageConfig - c, err := config.StorageFromFile(ls.PathToJSON, &def) + c, err := StorageFromFile(ls.PathToJSON, &def) if err != nil { return storiface.StorageConfig{}, err } @@ -23,12 +33,12 @@ func (ls *BasicLocalStorage) GetStorage() (storiface.StorageConfig, error) { func (ls *BasicLocalStorage) SetStorage(f func(*storiface.StorageConfig)) error { var def storiface.StorageConfig - c, err := config.StorageFromFile(ls.PathToJSON, &def) + c, err := StorageFromFile(ls.PathToJSON, &def) if err != nil { return err } f(c) - return config.WriteStorageFile(ls.PathToJSON, *c) + return WriteStorageFile(ls.PathToJSON, *c) } func (ls *BasicLocalStorage) Stat(path string) (fsutil.FsStat, error) { @@ -42,3 +52,69 @@ func (ls *BasicLocalStorage) DiskUsage(path string) (int64, error) { } return si.OnDisk, nil } + +func StorageFromFile(path string, def *storiface.StorageConfig) (*storiface.StorageConfig, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, xerrors.Errorf("expanding storage config path: %w", err) + } + + file, err := os.Open(path) + switch { + case os.IsNotExist(err): + if def == nil { + return nil, xerrors.Errorf("couldn't load storage config: %w", err) + } + return def, nil + case err != nil: + return nil, err + } + + defer file.Close() //nolint:errcheck // The file is RO + return StorageFromReader(file) +} + +func StorageFromReader(reader io.Reader) (*storiface.StorageConfig, error) { + var cfg storiface.StorageConfig + err := json.NewDecoder(reader).Decode(&cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} + +func WriteStorageFile(filePath string, config storiface.StorageConfig) error { + filePath, err := homedir.Expand(filePath) + if err != nil { + return xerrors.Errorf("expanding storage config path: %w", err) + } + + b, err := json.MarshalIndent(config, "", " ") + if err != nil { + return xerrors.Errorf("marshaling storage config: %w", err) + } + + info, err := os.Stat(filePath) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return xerrors.Errorf("statting storage config (%s): %w", filePath, err) + } + if gopath.Base(filePath) == "." { + filePath = gopath.Join(filePath, "storage.json") + } + } else { + if info.IsDir() || gopath.Base(filePath) == "." { + filePath = gopath.Join(filePath, "storage.json") + } + } + + if err := os.MkdirAll(gopath.Dir(filePath), 0755); err != nil { + return xerrors.Errorf("making storage config parent directory: %w", err) + } + if err := os.WriteFile(filePath, b, 0644); err != nil { + return xerrors.Errorf("persisting storage config (%s): %w", filePath, err) + } + + return nil +} diff --git a/lib/paths/localstorage_cached.go b/lib/paths/localstorage_cached.go index 9289852b4..fd7560cf2 100644 --- a/lib/paths/localstorage_cached.go +++ b/lib/paths/localstorage_cached.go @@ -7,8 +7,9 @@ import ( lru "github.com/hashicorp/golang-lru/v2" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var StatTimeout = 5 * time.Second diff --git a/lib/paths/mocks/index.go b/lib/paths/mocks/index.go index f64b80dea..082d22908 100644 --- a/lib/paths/mocks/index.go +++ b/lib/paths/mocks/index.go @@ -13,9 +13,9 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" paths "github.com/filecoin-project/curio/lib/paths" + storiface "github.com/filecoin-project/curio/lib/storiface" fsutil "github.com/filecoin-project/lotus/storage/sealer/fsutil" - storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) // MockSectorIndex is a mock of SectorIndex interface. diff --git a/lib/paths/mocks/pf.go b/lib/paths/mocks/pf.go index e1604be79..072e47b2e 100644 --- a/lib/paths/mocks/pf.go +++ b/lib/paths/mocks/pf.go @@ -12,8 +12,8 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" - partialfile "github.com/filecoin-project/lotus/storage/sealer/partialfile" - storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" + partialfile "github.com/filecoin-project/curio/lib/partialfile" + storiface "github.com/filecoin-project/curio/lib/storiface" ) // MockPartialFileHandler is a mock of PartialFileHandler interface. diff --git a/lib/paths/mocks/store.go b/lib/paths/mocks/store.go index 2ee11327a..e7c3c184c 100644 --- a/lib/paths/mocks/store.go +++ b/lib/paths/mocks/store.go @@ -13,8 +13,9 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" + storiface "github.com/filecoin-project/curio/lib/storiface" + fsutil "github.com/filecoin-project/lotus/storage/sealer/fsutil" - storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) // MockStore is a mock of Store interface. diff --git a/lib/paths/remote.go b/lib/paths/remote.go index ed696bf76..c0ff6322c 100644 --- a/lib/paths/remote.go +++ b/lib/paths/remote.go @@ -20,9 +20,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/partialfile" + storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/partialfile" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var FetchTempSubdir = "fetching" diff --git a/lib/paths/remote_prove.go b/lib/paths/remote_prove.go index 5185f0c6d..edb83b9aa 100644 --- a/lib/paths/remote_prove.go +++ b/lib/paths/remote_prove.go @@ -16,9 +16,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/tarutil" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) // ReadMinCacheInto reads finalized-like (few MiB) cache files into the target dir diff --git a/lib/paths/remote_test.go b/lib/paths/remote_test.go index 9c070f2f2..7602eb116 100644 --- a/lib/paths/remote_test.go +++ b/lib/paths/remote_test.go @@ -22,12 +22,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/partialfile" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/paths/mocks" - - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/sealer/partialfile" - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) const metaFile = "sectorstore.json" @@ -70,23 +68,9 @@ func TestMoveShared(t *testing.T) { dir := t.TempDir() - openRepo := func(dir string) repo.LockedRepo { - r, err := repo.NewFS(dir) - require.NoError(t, err) - require.NoError(t, r.Init(repo.Worker)) - lr, err := r.Lock(repo.Worker) - require.NoError(t, err) - - t.Cleanup(func() { - _ = lr.Close() - }) - - err = lr.SetStorage(func(config *storiface.StorageConfig) { - *config = storiface.StorageConfig{} - }) - require.NoError(t, err) - - return lr + openRepo := func(dir string) paths.LocalStorage { + bls := &paths.BasicLocalStorage{PathToJSON: filepath.Join(t.TempDir(), "storage.json")} + return bls } // setup two repos with two storage paths: diff --git a/lib/pieceprovider/piece_provider.go b/lib/pieceprovider/piece_provider.go new file mode 100644 index 000000000..bacf70fce --- /dev/null +++ b/lib/pieceprovider/piece_provider.go @@ -0,0 +1,463 @@ +package pieceprovider + +import ( + "bufio" + "context" + "io" + "sync" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + pool "github.com/libp2p/go-buffer-pool" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" + + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/storage/sealer/fr32" +) + +var log = logging.Logger("piece-provider") + +type PieceProvider struct { + storage *paths.Remote + index paths.SectorIndex +} + +func NewPieceProvider(storage *paths.Remote, index paths.SectorIndex) *PieceProvider { + return &PieceProvider{ + storage: storage, + index: index, + } +} + +// IsUnsealed checks if we have the unsealed piece at the given offset in an already +// existing unsealed file either locally or on any of the workers. +func (p *PieceProvider) IsUnsealed(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + if err := offset.Valid(); err != nil { + return false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + ctxLock, cancel := context.WithCancel(ctx) + defer cancel() + + if err := p.index.StorageLock(ctxLock, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + return false, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + return p.storage.CheckIsUnsealed(ctxLock, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) +} + +// tryReadUnsealedPiece will try to read the unsealed piece from an existing unsealed sector file for the given sector from any worker that has it. +// It will NOT try to schedule an Unseal of a sealed sector file for the read. +// +// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. +func (p *PieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize) (storiface.Reader, error) { + // acquire a lock purely for reading unsealed sectors + ctx, cancel := context.WithCancel(ctx) + if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + cancel() + return nil, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + // Reader returns a reader getter for an unsealed piece at the given offset in the given sector. + // The returned reader will be nil if none of the workers has an unsealed sector file containing + // the unsealed piece. + readerGetter, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(pieceOffset.Padded()), pieceSize.Padded()) + if err != nil { + cancel() + log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) + return nil, err + } + if readerGetter == nil { + cancel() + return nil, nil + } + + pr, err := (&pieceReader{ + getReader: func(startOffset, readSize uint64) (io.ReadCloser, error) { + // The request is for unpadded bytes, at any offset. + // storage.Reader readers give us fr32-padded bytes, so we need to + // do the unpadding here. + + startOffsetAligned := storiface.UnpaddedFloor(startOffset) + startOffsetDiff := int(startOffset - uint64(startOffsetAligned)) + + endOffset := startOffset + readSize + endOffsetAligned := storiface.UnpaddedCeil(endOffset) + + r, err := readerGetter(startOffsetAligned.Padded(), endOffsetAligned.Padded()) + if err != nil { + return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err) + } + + buf := pool.Get(fr32.BufSize(pieceSize.Padded())) + + upr, err := fr32.NewUnpadReaderBuf(r, pieceSize.Padded(), buf) + if err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("creating unpadded reader: %w", err) + } + + bir := bufio.NewReaderSize(upr, 127) + if startOffset > uint64(startOffsetAligned) { + if _, err := bir.Discard(startOffsetDiff); err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("discarding bytes for startOffset: %w", err) + } + } + + var closeOnce sync.Once + + return struct { + io.Reader + io.Closer + }{ + Reader: bir, + Closer: funcCloser(func() error { + closeOnce.Do(func() { + pool.Put(buf) + }) + return r.Close() + }), + }, nil + }, + len: pieceSize, + onClose: cancel, + pieceCid: pc, + }).init(ctx) + if err != nil || pr == nil { // pr == nil to make sure we don't return typed nil + cancel() + return nil, err + } + + return pr, err +} + +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) + +// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector +// If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. +// Otherwise, an error is returned +func (p *PieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, pieceCid cid.Cid) (storiface.Reader, error) { + if err := pieceOffset.Valid(); err != nil { + return nil, xerrors.Errorf("pieceOffset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return nil, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + r, err := p.tryReadUnsealedPiece(ctx, pieceCid, sector, pieceOffset, size) + + if err != nil { + log.Errorf("error getting reading piece:%s", err) + return nil, err + } + + log.Debugf("returning reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) + + return r, nil +} + +var _ storiface.Reader = &pieceReader{} + +// MaxPieceReaderBurnBytes - For small read skips, it's faster to "burn" some bytes than to set up new sector reader. +// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB. +var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M +var ReadBuf = 128 * (127 * 8) // unpadded(128k) + +var MinRandomReadSize = int64(4 << 10) + +type pieceGetter func(offset, size uint64) (io.ReadCloser, error) + +type pieceReader struct { + getReader pieceGetter + pieceCid cid.Cid + len abi.UnpaddedPieceSize + onClose context.CancelFunc + + seqMCtx context.Context + atMCtx context.Context + + closed bool + seqAt int64 // next byte to be read by io.Reader + + // sequential reader + seqMu sync.Mutex + r io.ReadCloser + br *bufio.Reader + rAt int64 + + // random read cache + remReads *lru.Cache[int64, []byte] // data start offset -> data + // todo try carrying a "bytes read sequentially so far" counter with those + // cacahed byte buffers, increase buffer sizes when we see that we're doing + // a long sequential read +} + +func (p *pieceReader) init(ctx context.Context) (_ *pieceReader, err error) { + stats.Record(ctx, metrics.DagStorePRInitCount.M(1)) + + p.seqMCtx, _ = tag.New(ctx, tag.Upsert(metrics.PRReadType, "seq")) + p.atMCtx, _ = tag.New(ctx, tag.Upsert(metrics.PRReadType, "rand")) + + p.remReads, err = lru.New[int64, []byte](100) + if err != nil { + return nil, err + } + + p.rAt = 0 + p.r, err = p.getReader(uint64(p.rAt), uint64(p.len)) + if err != nil { + return nil, err + } + if p.r == nil { + return nil, nil + } + + p.br = bufio.NewReaderSize(p.r, ReadBuf) + + return p, nil +} + +func (p *pieceReader) check() error { + if p.closed { + return xerrors.Errorf("reader closed") + } + + return nil +} + +func (p *pieceReader) Close() error { + p.seqMu.Lock() + defer p.seqMu.Unlock() + + if err := p.check(); err != nil { + return err + } + + if p.r != nil { + if err := p.r.Close(); err != nil { + return err + } + p.r = nil + } + + p.onClose() + + p.closed = true + + return nil +} + +func (p *pieceReader) Read(b []byte) (int, error) { + p.seqMu.Lock() + defer p.seqMu.Unlock() + + if err := p.check(); err != nil { + return 0, err + } + + n, err := p.readSeqReader(b) + p.seqAt += int64(n) + return n, err +} + +func (p *pieceReader) Seek(offset int64, whence int) (int64, error) { + p.seqMu.Lock() + defer p.seqMu.Unlock() + + if err := p.check(); err != nil { + return 0, err + } + + switch whence { + case io.SeekStart: + p.seqAt = offset + case io.SeekCurrent: + p.seqAt += offset + case io.SeekEnd: + p.seqAt = int64(p.len) + offset + default: + return 0, xerrors.Errorf("bad whence") + } + + return p.seqAt, nil +} + +func (p *pieceReader) readSeqReader(b []byte) (n int, err error) { + off := p.seqAt + + if err := p.check(); err != nil { + return 0, err + } + + stats.Record(p.seqMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b)))) + + // 1. Get the backing reader into the correct position + + // if the backing reader is ahead of the offset we want, or more than + // MaxPieceReaderBurnBytes behind, reset the reader + if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off { + if p.r != nil { + if err := p.r.Close(); err != nil { + return 0, xerrors.Errorf("closing backing reader: %w", err) + } + p.r = nil + p.br = nil + } + + log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b)) + + if off > p.rAt { + stats.Record(p.seqMCtx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1)) + } else { + stats.Record(p.seqMCtx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1)) + } + + p.rAt = off + p.r, err = p.getReader(uint64(p.rAt), uint64(p.len)) + p.br = bufio.NewReaderSize(p.r, ReadBuf) + if err != nil { + return 0, xerrors.Errorf("getting backing reader: %w", err) + } + } + + // 2. Check if we need to burn some bytes + if off > p.rAt { + stats.Record(p.seqMCtx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1)) + + n, err := io.CopyN(io.Discard, p.br, off-p.rAt) + p.rAt += n + if err != nil { + return 0, xerrors.Errorf("discarding read gap: %w", err) + } + } + + // 3. Sanity check + if off != p.rAt { + return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt) + } + + // 4. Read! + n, err = io.ReadFull(p.br, b) + if n < len(b) { + log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err) + } + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + + p.rAt += int64(n) + return n, err +} + +func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) { + stats.Record(p.atMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b)))) + + var filled int64 + + // try to get a buf from lru + data, ok := p.remReads.Get(off) + if ok { + n = copy(b, data) + filled += int64(n) + + if n < len(data) { + p.remReads.Add(off+int64(n), data[n:]) + + // keep the header buffered + if off != 0 { + p.remReads.Remove(off) + } + } + + stats.Record(p.atMCtx, metrics.DagStorePRAtHitBytes.M(int64(n)), metrics.DagStorePRAtHitCount.M(1)) + // dagstore/pr_at_hit_bytes, dagstore/pr_at_hit_count + } + if filled == int64(len(b)) { + // dagstore/pr_at_cache_fill_count + stats.Record(p.atMCtx, metrics.DagStorePRAtCacheFillCount.M(1)) + return n, nil + } + + readOff := off + filled + readSize := int64(len(b)) - filled + + smallRead := readSize < MinRandomReadSize + + if smallRead { + // read into small read buf + readBuf := make([]byte, MinRandomReadSize) + bn, err := p.readInto(readBuf, readOff) + if err != nil && err != io.EOF { + return int(filled), err + } + + _ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "small")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1)) + + // reslice so that the slice is the data + readBuf = readBuf[:bn] + + // fill user data + used := copy(b[filled:], readBuf[:]) + filled += int64(used) + readBuf = readBuf[used:] + + // cache the rest + if len(readBuf) > 0 { + p.remReads.Add(readOff+int64(used), readBuf) + } + } else { + // read into user buf + bn, err := p.readInto(b[filled:], readOff) + if err != nil { + return int(filled), err + } + filled += int64(bn) + + _ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "big")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1)) + } + + if filled < int64(len(b)) { + return int(filled), io.EOF + } + + return int(filled), nil +} + +func (p *pieceReader) readInto(b []byte, off int64) (n int, err error) { + rd, err := p.getReader(uint64(off), uint64(len(b))) + if err != nil { + return 0, xerrors.Errorf("getting reader: %w", err) + } + + n, err = io.ReadFull(rd, b) + + cerr := rd.Close() + + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + + if err != nil { + return n, err + } + + return n, cerr +} diff --git a/lib/storiface/cbor_gen.go b/lib/storiface/cbor_gen.go new file mode 100644 index 000000000..e1d5f6467 --- /dev/null +++ b/lib/storiface/cbor_gen.go @@ -0,0 +1,343 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package storiface + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Key (string) (string) + if len("Key") > 8192 { + return xerrors.Errorf("Value in field \"Key\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Key"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Key")); err != nil { + return err + } + + if len(t.Key) > 8192 { + return xerrors.Errorf("Value in field t.Key was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Key)); err != nil { + return err + } + + // t.Value (string) (string) + if len("Value") > 8192 { + return xerrors.Errorf("Value in field \"Value\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Value")); err != nil { + return err + } + + if len(t.Value) > 8192 { + return xerrors.Errorf("Value in field t.Value was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Value)); err != nil { + return err + } + return nil +} + +func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { + *t = SecDataHttpHeader{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SecDataHttpHeader: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Key (string) (string) + case "Key": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Key = string(sval) + } + // t.Value (string) (string) + case "Value": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Value = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *SectorLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.URL (string) (string) + if len("URL") > 8192 { + return xerrors.Errorf("Value in field \"URL\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("URL"))); err != nil { + return err + } + if _, err := cw.WriteString(string("URL")); err != nil { + return err + } + + if len(t.URL) > 8192 { + return xerrors.Errorf("Value in field t.URL was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.URL))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.URL)); err != nil { + return err + } + + // t.Local (bool) (bool) + if len("Local") > 8192 { + return xerrors.Errorf("Value in field \"Local\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Local"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Local")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Local); err != nil { + return err + } + + // t.Headers ([]storiface.SecDataHttpHeader) (slice) + if len("Headers") > 8192 { + return xerrors.Errorf("Value in field \"Headers\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Headers"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Headers")); err != nil { + return err + } + + if len(t.Headers) > 8192 { + return xerrors.Errorf("Slice value in field t.Headers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Headers))); err != nil { + return err + } + for _, v := range t.Headers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + + } + return nil +} + +func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { + *t = SectorLocation{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SectorLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.URL (string) (string) + case "URL": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.URL = string(sval) + } + // t.Local (bool) (bool) + case "Local": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Local = false + case 21: + t.Local = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Headers ([]storiface.SecDataHttpHeader) (slice) + case "Headers": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 8192 { + return fmt.Errorf("t.Headers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Headers = make([]SecDataHttpHeader, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err + + { + + if err := t.Headers[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Headers[i]: %w", err) + } + + } + + } + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/lib/storiface/ffi.go b/lib/storiface/ffi.go new file mode 100644 index 000000000..4a9f832b8 --- /dev/null +++ b/lib/storiface/ffi.go @@ -0,0 +1,41 @@ +package storiface + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/storage/sealer/fr32" +) + +var ErrSectorNotFound = errors.New("sector not found") + +type UnpaddedByteIndex uint64 + +func (i UnpaddedByteIndex) Padded() PaddedByteIndex { + return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) +} + +func (i UnpaddedByteIndex) Valid() error { + if i%127 != 0 { + return xerrors.Errorf("unpadded byte index must be a multiple of 127") + } + + return nil +} + +func UnpaddedFloor(n uint64) UnpaddedByteIndex { + return UnpaddedByteIndex(n / uint64(fr32.UnpaddedFr32Chunk) * uint64(fr32.UnpaddedFr32Chunk)) +} + +func UnpaddedCeil(n uint64) UnpaddedByteIndex { + return UnpaddedByteIndex((n + uint64(fr32.UnpaddedFr32Chunk-1)) / uint64(fr32.UnpaddedFr32Chunk) * uint64(fr32.UnpaddedFr32Chunk)) +} + +type PaddedByteIndex uint64 + +type RGetter func(ctx context.Context, id abi.SectorID) (sealed cid.Cid, update bool, err error) diff --git a/lib/storiface/filetype.go b/lib/storiface/filetype.go new file mode 100644 index 000000000..56983a25d --- /dev/null +++ b/lib/storiface/filetype.go @@ -0,0 +1,460 @@ +package storiface + +import ( + "fmt" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" +) + +// FTUnsealed represents an unsealed sector file. +// FTSealed represents a sealed sector file. +// FTCache represents a cache sector file. +// FTUpdate represents an update sector file. +// FTUpdateCache represents an update cache sector file. +// FTPiece represents a Piece Park sector file. +// FileTypes represents the total number of file. +// +// The SectorFileType type is an integer type that represents different sector file. +// It has several methods to manipulate and query the file. +// The String method returns a string representation of the file. +// The Strings method returns a slice of string representations of all the file that are set in the receiver object. +// The AllSet method returns a slice of all the file that are set in the receiver object. +// The Has method checks whether a specific file type is set in the receiver object. +// The SealSpaceUse method calculates the space used by the receiver object in sealing a sector of a given size. +// The SubAllowed method removes selected file from the receiver object based on a list of allowed and denied file. +// The Unset method removes selected file from the receiver object. +// The AnyAllowed method checks whether any file in the receiver object are allowed based on a list of allowed and denied file. +// The Allowed method checks whether all file in the receiver object are allowed based on a list of allowed and denied file. +// The StoreSpaceUse method calculates the space used by the receiver object in storing a sector of a given size. +// The All method returns an array that represents which file are set in the receiver object. +// The IsNone method checks whether the receiver object represents no file. +const ( + // "regular" sectors + FTUnsealed SectorFileType = 1 << iota + FTSealed + FTCache + + // snap + FTUpdate + FTUpdateCache + + // Piece Park + FTPiece + + FileTypes = iota +) + +// PathTypes is a slice of SectorFileType that represents different types of sector file paths. +// It contains the following types of sector file paths: +// - FTUnsealed: represents regular unsealed sectors +// - FTSealed: represents sealed sectors +// - FTCache: represents cache sectors +// - FTUpdate: represents snap sectors +// - FTUpdateCache: represents snap cache sectors +// - FTPiece: represents Piece Park sectors +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache, FTPiece} + +// FTNone represents a sector file type of none. This constant is used in the StorageLock method to specify that a sector should not have any file locked. +// Example usage: +// err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache) +const ( + FTNone SectorFileType = 0 +) + +// FTAll represents the combination of all available sector file. +// It is a variable of type SectorFileType. +// The value of FTAll is calculated by iterating over the PathTypes slice and using the |= operator to perform a bitwise OR operation on each path type. +// The result is assigned to the variable out and returned. +// FTAll is immediately invoked as a function using the anonymous function syntax, so the result is returned as soon as it is calculated. +var FTAll = func() (out SectorFileType) { + for _, pathType := range PathTypes { + out |= pathType + } + return out +}() + +// FSOverheadDen represents the constant value 10, which is used to calculate the overhead in various storage space utilization calculations. +const FSOverheadDen = 10 + +// FSOverheadSeal is a map that represents the overheads for different SectorFileType in sectors which are being sealed. +var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: FSOverheadDen*2 + 1, + FTCache: 141, // 11 layers + D(2x ssize) + C + R' + FTPiece: FSOverheadDen, +} + +// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size + +// FsOverheadFinalized is a map that represents the finalized overhead for different types of SectorFileType. +// The keys in the map are the SectorFileType values, and the values are integers representing the overhead. +// It is used to calculate the storage space usage for different types of sectors, as shown in the example below: +// The overhead value is retrieved from FsOverheadFinalized by using the SectorFileType value as the key. +// If the overhead value is not found in the map, an error is returned indicating that there is no finalized +// overhead information for the given sector type. +var FsOverheadFinalized = map[SectorFileType]int{ + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: 1, + FTCache: 1, + FTPiece: FSOverheadDen, +} + +// SectorFileType represents the type of a sector file +// TypeFromString converts a string to a SectorFileType +type SectorFileType int + +// TypeFromString converts a string representation of a SectorFileType to its corresponding value. +// It returns the SectorFileType and nil error if the string matches one of the existing types. +// If the string does not match any type, it returns 0 and an error. +func TypeFromString(s string) (SectorFileType, error) { + switch s { + case "unsealed": + return FTUnsealed, nil + case "sealed": + return FTSealed, nil + case "cache": + return FTCache, nil + case "update": + return FTUpdate, nil + case "update-cache": + return FTUpdateCache, nil + case "piece": + return FTPiece, nil + default: + return 0, xerrors.Errorf("unknown sector file type '%s'", s) + } +} + +// String returns a string representation of the SectorFileType. +func (t SectorFileType) String() string { + switch t { + case FTUnsealed: + return "unsealed" + case FTSealed: + return "sealed" + case FTCache: + return "cache" + case FTUpdate: + return "update" + case FTUpdateCache: + return "update-cache" + case FTPiece: + return "piece" + default: + return fmt.Sprintf("", t, (t & ((1 << FileTypes) - 1)).Strings()) + } +} + +// Strings returns a slice of strings representing the names of the SectorFileType values that are set in the receiver value. +// Example usage: +// +// fileType := SectorFileType(FTSealed | FTCache) +// names := fileType.Strings() // names = ["sealed", "cache"] +// fmt.Println(names) +func (t SectorFileType) Strings() []string { + var out []string + for _, fileType := range PathTypes { + if fileType&t == 0 { + continue + } + + out = append(out, fileType.String()) + } + return out +} + +// AllSet returns a slice of SectorFileType values that are set in the SectorFileType receiver value +func (t SectorFileType) AllSet() []SectorFileType { + var out []SectorFileType + for _, fileType := range PathTypes { + if fileType&t == 0 { + continue + } + + out = append(out, fileType) + } + return out +} + +// Has checks if the SectorFileType has a specific singleType. +func (t SectorFileType) Has(singleType SectorFileType) bool { + return t&singleType == singleType +} + +// SealSpaceUse calculates the amount of space needed for sealing the sector +// based on the given sector size. It iterates over the different path types +// and calculates the space needed for each path type using the FSOverheadSeal +// map. The overhead value is multiplied by the sector size and divided by the +// FSOverheadDen constant. The total space needed is accumulated and returned. +// If there is no seal overhead information for a particular path type, an error +// is returned. +// +// Example usage: +// +// fileType := FTSealed | FTCache +// sectorSize := abi.SectorSize(32 << 20) // 32 MiB +// spaceNeeded, err := fileType.SealSpaceUse(sectorSize) +// +// Parameters: +// +// ssize: The size of the sector +// +// Returns: +// +// uint64: The amount of space needed for sealing the sector +// error: If there is no seal overhead information for a path type +func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FSOverheadSeal[pathType] + if !ok { + return 0, xerrors.Errorf("no seal overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + +// SubAllowed takes in two parameters: allowTypes and denyTypes, both of which are slices of strings. +// If allowTypes is not empty, the method sets a denyMask with all bits set to 1, and then iterates over each allowType, +// converting it to a SectorFileType using the TypeFromString function and unsetting the corresponding bit in the denyMask. +// If a string in allowTypes cannot be converted to a valid SectorFileType, it is ignored. +// After processing allowTypes, the method iterates over each denyType, converting it to a SectorFileType using the TypeFromString function +// and setting the corresponding bit in the denyMask. +// If a string in denyTypes cannot be converted to a valid SectorFileType, it is ignored. +// Finally, the method returns the bitwise AND of the original SectorFileType and the denyMask. +// The returned SectorFileType will only allow the types specified in allowTypes and exclude the types specified in denyTypes.` +func (t SectorFileType) SubAllowed(allowTypes []string, denyTypes []string) SectorFileType { + var denyMask SectorFileType // 1s deny + + if len(allowTypes) > 0 { + denyMask = ^denyMask + + for _, allowType := range allowTypes { + pt, err := TypeFromString(allowType) + if err != nil { + // we've told the user about this already, don't spam logs and ignore + continue + } + + denyMask = denyMask & (^pt) // unset allowed types + } + } + + for _, denyType := range denyTypes { + pt, err := TypeFromString(denyType) + if err != nil { + // we've told the user about this already, don't spam logs and ignore + continue + } + denyMask |= pt + } + + return t & denyMask +} + +// Unset removes the specified sector file type(s) from the current SectorFileType value. +// It performs a bitwise AND operation between the current value and the bitwise complement of the toUnset value. +// The result is returned as a new SectorFileType value. +// Any bits that are set in toUnset will be cleared in the result. +// Usage: result = value.Unset(typesToUnset) +func (t SectorFileType) Unset(toUnset SectorFileType) SectorFileType { + return t &^ toUnset +} + +// AnyAllowed checks if the SectorFileType has any allowed types and no denied types. +func (t SectorFileType) AnyAllowed(allowTypes []string, denyTypes []string) bool { + return t.SubAllowed(allowTypes, denyTypes) != t +} + +// Allowed checks if the SectorFileType is allowed based on the given allowTypes and denyTypes. +// Returns true if the SectorFileType is allowed, otherwise false. +func (t SectorFileType) Allowed(allowTypes []string, denyTypes []string) bool { + return t.SubAllowed(allowTypes, denyTypes) == 0 +} + +// StoreSpaceUse calculates the space used for storing sectors of a specific file type. +// It takes the sector size as input and returns the total space needed in bytes and an error, if any. +// The calculation is based on the finalized overhead information for the file type. +// If the overhead information is not available for a particular file type, an error will be returned. +func (t SectorFileType) StoreSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FsOverheadFinalized[pathType] + if !ok { + return 0, xerrors.Errorf("no finalized overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + +// All returns an array indicating whether each FileTypes flag is set in the SectorFileType. +func (t SectorFileType) All() [FileTypes]bool { + var out [FileTypes]bool + + for i := range out { + out[i] = t&(1< 0 + } + + return out +} + +// IsNone checks if the SectorFileType value is equal to zero. +// It returns true if the value is zero, indicating that the type is none. +func (t SectorFileType) IsNone() bool { + return t == 0 +} + +// SectorPaths represents the paths for different sector files. +type SectorPaths struct { + ID abi.SectorID + + Unsealed string + Sealed string + Cache string + Update string + UpdateCache string + Piece string +} + +// HasAllSet checks if all paths of a SectorPaths struct are set for a given SectorFileType. +func (sp SectorPaths) HasAllSet(ft SectorFileType) bool { + for _, fileType := range ft.AllSet() { + if PathByType(sp, fileType) == "" { + return false + } + } + + return true +} + +// Subset returns a new instance of SectorPaths that contains only the paths specified by the filter SectorFileType. +// It iterates over each fileType in the filter, retrieves the corresponding path from the original SectorPaths instance, and sets it in the new instance. +// Finally, it sets the ID field of the new instance to be the same as the original instance. +func (sp SectorPaths) Subset(filter SectorFileType) SectorPaths { + var out SectorPaths + + for _, fileType := range filter.AllSet() { + SetPathByType(&out, fileType, PathByType(sp, fileType)) + } + + out.ID = sp.ID + + return out +} + +// ParseSectorID parses a sector ID from a given base name. +// It expects the format "s-t0%d-%d", where the first %d represents the miner ID +// and the second %d represents the sector number. +// +// Parameters: +// - baseName: The base name from which to parse the sector ID. +// +// Returns: +// - abi.SectorID: The parsed sector ID. +// - error: An error if parsing fails. +// +// Example usage: +// +// id, err := ParseSectorID(baseName) +// if err != nil { +// // handle error +// } +// // use id +func ParseSectorID(baseName string) (abi.SectorID, error) { + var n abi.SectorNumber + var mid abi.ActorID + read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n) + if err != nil { + return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err) + } + + if read != 2 { + return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read) + } + + return abi.SectorID{ + Miner: mid, + Number: n, + }, nil +} + +// SectorName returns the name of a sector in the format "s-t0-" +// +// Parameters: +// - sid: The sector ID +// +// Returns: +// - The name of the sector as a string +func SectorName(sid abi.SectorID) string { + return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number) +} + +// PathByType returns the path associated with the specified fileType in the given SectorPaths. +// It panics if the requested path type is unknown. +func PathByType(sps SectorPaths, fileType SectorFileType) string { + switch fileType { + case FTUnsealed: + return sps.Unsealed + case FTSealed: + return sps.Sealed + case FTCache: + return sps.Cache + case FTUpdate: + return sps.Update + case FTUpdateCache: + return sps.UpdateCache + case FTPiece: + return sps.Piece + } + + panic("requested unknown path type") +} + +func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { + switch fileType { + case FTUnsealed: + sps.Unsealed = p + case FTSealed: + sps.Sealed = p + case FTCache: + sps.Cache = p + case FTUpdate: + sps.Update = p + case FTUpdateCache: + sps.UpdateCache = p + case FTPiece: + sps.Piece = p + } +} + +// PathsWithIDs represents paths and IDs for sector files. +type PathsWithIDs struct { + Paths SectorPaths + IDs SectorPaths +} + +// HasAllSet checks if all paths and IDs in PathsWithIDs have a corresponding path set for the specified SectorFileType. +// It returns true if all paths and IDs are set, and false otherwise. +func (p PathsWithIDs) HasAllSet(ft SectorFileType) bool { + return p.Paths.HasAllSet(ft) && p.IDs.HasAllSet(ft) +} diff --git a/lib/storiface/index.go b/lib/storiface/index.go new file mode 100644 index 000000000..3bf695687 --- /dev/null +++ b/lib/storiface/index.go @@ -0,0 +1,134 @@ +package storiface + +import ( + "strings" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/storage/sealer/fsutil" +) + +// ID identifies sector storage by UUID. One sector storage should map to one +// +// filesystem, local or networked / shared by multiple machines +type ID string + +const IDSep = "." + +type IDList []ID + +func (il IDList) String() string { + l := make([]string, len(il)) + for i, id := range il { + l[i] = string(id) + } + return strings.Join(l, IDSep) +} + +func ParseIDList(s string) IDList { + strs := strings.Split(s, IDSep) + out := make([]ID, len(strs)) + for i, str := range strs { + out[i] = ID(str) + } + return out +} + +type Group = string + +type StorageInfo struct { + // ID is the UUID of the storage path + ID ID + + // URLs for remote access + URLs []string // TODO: Support non-http transports + + // Storage path weight; higher number means that the path will be preferred more often + Weight uint64 + + // MaxStorage is the number of bytes allowed to be used by files in the + // storage path + MaxStorage uint64 + + // CanStore is true when the path is allowed to be used for io-intensive + // sealing operations + CanSeal bool + + // CanStore is true when the path is allowed to be used for long-term storage + CanStore bool + + // Groups is the list of path groups this path belongs to + Groups []Group + + // AllowTo is the list of paths to which data from this path can be moved to + AllowTo []Group + + // AllowTypes lists sector file types which are allowed to be put into this + // path. If empty, all file types are allowed. + // + // Valid values: + // - "unsealed" + // - "sealed" + // - "cache" + // - "update" + // - "update-cache" + // Any other value will generate a warning and be ignored. + AllowTypes []string + + // DenyTypes lists sector file types which aren't allowed to be put into this + // path. + // + // Valid values: + // - "unsealed" + // - "sealed" + // - "cache" + // - "update" + // - "update-cache" + // Any other value will generate a warning and be ignored. + DenyTypes []string + + // AllowMiners lists miner IDs which are allowed to store their sector data into + // this path. If empty, all miner IDs are allowed + AllowMiners []string + + // DenyMiners lists miner IDs which are denied to store their sector data into + // this path + DenyMiners []string +} + +type HealthReport struct { + Stat fsutil.FsStat + Err string +} + +type SectorStorageInfo struct { + ID ID + URLs []string // TODO: Support non-http transports + BaseURLs []string + Weight uint64 + + CanSeal bool + CanStore bool + + Primary bool + + AllowTypes []string + DenyTypes []string + AllowMiners []string + DenyMiners []string +} + +type Decl struct { + abi.SectorID + SectorFileType +} + +type StoragePath struct { + ID ID + Weight uint64 + + LocalPath string + + CanSeal bool + CanStore bool +} diff --git a/lib/storiface/paths.go b/lib/storiface/paths.go new file mode 100644 index 000000000..0f0eaeadf --- /dev/null +++ b/lib/storiface/paths.go @@ -0,0 +1,39 @@ +package storiface + +import "github.com/filecoin-project/go-state-types/abi" + +type PathType string + +const ( + PathStorage PathType = "storage" + PathSealing PathType = "sealing" +) + +type AcquireMode string + +const ( + AcquireMove AcquireMode = "move" + AcquireCopy AcquireMode = "copy" +) + +type SectorLock struct { + Sector abi.SectorID + Write [FileTypes]uint + Read [FileTypes]uint +} + +type SectorLocks struct { + Locks []SectorLock +} + +type AcquireSettings struct { + Into *PathsWithIDs +} + +type AcquireOption func(*AcquireSettings) + +func AcquireInto(pathIDs PathsWithIDs) AcquireOption { + return func(settings *AcquireSettings) { + settings.Into = &pathIDs + } +} diff --git a/lib/storiface/storage.go b/lib/storiface/storage.go new file mode 100644 index 000000000..62884d08e --- /dev/null +++ b/lib/storiface/storage.go @@ -0,0 +1,164 @@ +package storiface + +import ( + "context" + "io" + "net/http" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/proof" +) + +type Data = io.Reader + +// Reader is a fully-featured Reader. It is the +// union of the standard IO sequential access method (Read), with seeking +// ability (Seek), as well random access (ReadAt). +type Reader interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker +} + +type SectorRef struct { + ID abi.SectorID + ProofType abi.RegisteredSealProof +} + +// PieceNumber is a reference to a piece in the storage system; mapping between +// pieces in the storage system and piece CIDs is maintained by the storage index +type PieceNumber uint64 + +func (pn PieceNumber) Ref() SectorRef { + return SectorRef{ + ID: abi.SectorID{Miner: 0, Number: abi.SectorNumber(pn)}, + ProofType: abi.RegisteredSealProof_StackedDrg64GiBV1, // This only cares about TreeD which is the same for all sizes + } +} + +type PreCommit1Out []byte + +type SectorCids struct { + Unsealed cid.Cid + Sealed cid.Cid +} + +type ReplicaUpdateProof []byte + +type Verifier interface { + VerifySeal(proof.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) + + GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) +} + +// Prover contains cheap proving-related methods +type Prover interface { + // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here + + AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) +} + +type SectorLocation struct { + // Local when set to true indicates to lotus that sector data is already + // available locally; When set lotus will skip fetching sector data, and + // only check that sector data exists in sector storage + Local bool + + // URL to the sector data + // For sealed/unsealed sector, lotus expects octet-stream + // For cache, lotus expects a tar archive with cache files + // Valid schemas: + // - http:// / https:// + URL string + + // optional http headers to use when requesting sector data + Headers []SecDataHttpHeader +} + +func (sd *SectorLocation) HttpHeaders() http.Header { + out := http.Header{} + for _, header := range sd.Headers { + out[header.Key] = append(out[header.Key], header.Value) + } + return out +} + +// note: we can't use http.Header as that's backed by a go map, which is all kinds of messy + +type SecDataHttpHeader struct { + Key string + Value string +} + +// StorageConfig .lotusstorage/storage.json +type StorageConfig struct { + StoragePaths []LocalPath +} + +type LocalPath struct { + Path string +} + +// LocalStorageMeta [path]/sectorstore.json +type LocalStorageMeta struct { + ID ID + + // A high weight means data is more likely to be stored in this path + Weight uint64 // 0 = readonly + + // Intermediate data for the sealing process will be stored here + CanSeal bool + + // Finalized sectors that will be proved over time will be stored here + CanStore bool + + // MaxStorage specifies the maximum number of bytes to use for sector storage + // (0 = unlimited) + MaxStorage uint64 + + // List of storage groups this path belongs to + Groups []string + + // List of storage groups to which data from this path can be moved. If none + // are specified, allow to all + AllowTo []string + + // AllowTypes lists sector file types which are allowed to be put into this + // path. If empty, all file types are allowed. + // + // Valid values: + // - "unsealed" + // - "sealed" + // - "cache" + // - "update" + // - "update-cache" + // Any other value will generate a warning and be ignored. + AllowTypes []string + + // DenyTypes lists sector file types which aren't allowed to be put into this + // path. + // + // Valid values: + // - "unsealed" + // - "sealed" + // - "cache" + // - "update" + // - "update-cache" + // Any other value will generate a warning and be ignored. + DenyTypes []string + + // AllowMiners lists miner IDs which are allowed to store their sector data into + // this path. If empty, all miner IDs are allowed + AllowMiners []string + + // DenyMiners lists miner IDs which are denied to store their sector data into + // this path + DenyMiners []string +} diff --git a/lib/storiface/worker.go b/lib/storiface/worker.go new file mode 100644 index 000000000..a6922f5cc --- /dev/null +++ b/lib/storiface/worker.go @@ -0,0 +1,81 @@ +package storiface + +import ( + "errors" + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/proof" +) + +type WindowPoStResult struct { + PoStProofs proof.PoStProof + Skipped []abi.SectorID +} + +type PostSectorChallenge struct { + SealProof abi.RegisteredSealProof + SectorNumber abi.SectorNumber + SealedCID cid.Cid + Challenge []uint64 + Update bool +} + +type FallbackChallenges struct { + Sectors []abi.SectorNumber + Challenges map[abi.SectorNumber][]uint64 +} + +type ErrorCode int + +const ( + ErrUnknown ErrorCode = iota +) + +const ( + // Temp Errors + ErrTempUnknown ErrorCode = iota + 100 + ErrTempWorkerRestart + ErrTempAllocateSpace +) + +type WorkError interface { + ErrCode() ErrorCode +} + +type CallError struct { + Code ErrorCode + Message string + sub error +} + +func (c *CallError) ErrCode() ErrorCode { + return c.Code +} + +func (c *CallError) Error() string { + return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message) +} + +func (c *CallError) Unwrap() error { + if c.sub != nil { + return c.sub + } + + return errors.New(c.Message) +} + +var _ WorkError = &CallError{} + +func Err(code ErrorCode, sub error) *CallError { + return &CallError{ + Code: code, + Message: sub.Error(), + + sub: sub, + } +} + +type WorkerJob struct{} // dummy diff --git a/lib/testutils/testutils.go b/lib/testutils/testutils.go new file mode 100644 index 000000000..8009ca867 --- /dev/null +++ b/lib/testutils/testutils.go @@ -0,0 +1,158 @@ +package testutils + +import ( + "context" + "fmt" + "io" + "math/rand" + "os" + + "github.com/ipfs/boxo/blockservice" + bstore "github.com/ipfs/boxo/blockstore" + chunk "github.com/ipfs/boxo/chunker" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/ipld/unixfs/importer/balanced" + ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-cidutil" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + ipldformat "github.com/ipfs/go-ipld-format" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/multiformats/go-multihash" +) + +const defaultHashFunction = uint64(multihash.BLAKE2B_MIN + 31) + +func CreateRandomFile(dir string, rseed, size int) (string, error) { + source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size)) + + file, err := os.CreateTemp(dir, "sourcefile.dat") + if err != nil { + return "", err + } + + _, err = io.Copy(file, source) + if err != nil { + return "", err + } + + // + _, err = file.Seek(0, io.SeekStart) + if err != nil { + return "", err + } + + return file.Name(), nil +} + +func CreateDenseCARWith(dir, src string, chunksize int64, maxlinks int, caropts []carv2.Option) (cid.Cid, string, error) { + bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + + root, err := WriteUnixfsDAGTo(src, dagSvc, chunksize, maxlinks) + if err != nil { + return cid.Undef, "", err + } + + // Create a UnixFS DAG again AND generate a CARv2 file using a CARv2 + // read-write blockstore now that we have the root. + out, err := os.CreateTemp(dir, "rand") + if err != nil { + return cid.Undef, "", err + } + err = out.Close() + if err != nil { + return cid.Undef, "", err + } + + rw, err := blockstore.OpenReadWrite(out.Name(), []cid.Cid{root}, caropts...) + if err != nil { + return cid.Undef, "", err + } + + dagSvc = merkledag.NewDAGService(blockservice.New(rw, offline.Exchange(rw))) + + root2, err := WriteUnixfsDAGTo(src, dagSvc, chunksize, maxlinks) + if err != nil { + return cid.Undef, "", err + } + + err = rw.Finalize() + if err != nil { + return cid.Undef, "", err + } + + if root != root2 { + return cid.Undef, "", fmt.Errorf("DAG root cid mismatch") + } + + return root, out.Name(), nil +} + +func WriteUnixfsDAGTo(path string, into ipldformat.DAGService, chunksize int64, maxlinks int) (cid.Cid, error) { + file, err := os.Open(path) + if err != nil { + return cid.Undef, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return cid.Undef, err + } + + // get a IPLD reader path file + // required to write the Unixfs DAG blocks to a filestore + rpf, err := files.NewReaderPathFile(file.Name(), file, stat) + if err != nil { + return cid.Undef, err + } + + // generate the dag and get the root + // import to UnixFS + prefix, err := merkledag.PrefixForCidVersion(1) + if err != nil { + return cid.Undef, err + } + + prefix.MhType = defaultHashFunction + + bufferedDS := ipldformat.NewBufferedDAG(context.Background(), into) + params := ihelper.DagBuilderParams{ + Maxlinks: maxlinks, + RawLeaves: true, + // NOTE: InlineBuilder not recommended, we are using this to test identity CIDs + CidBuilder: cidutil.InlineBuilder{ + Builder: prefix, + Limit: 126, + }, + Dagserv: bufferedDS, + NoCopy: true, + } + + db, err := params.New(chunk.NewSizeSplitter(rpf, chunksize)) + if err != nil { + return cid.Undef, err + } + + nd, err := balanced.Layout(db) + if err != nil { + return cid.Undef, err + } + + err = bufferedDS.Commit() + if err != nil { + return cid.Undef, err + } + + err = rpf.Close() + if err != nil { + return cid.Undef, err + } + + return nd.Cid(), nil +} diff --git a/market/deal_ingest_common.go b/market/deal_ingest_common.go deleted file mode 100644 index 0274b033b..000000000 --- a/market/deal_ingest_common.go +++ /dev/null @@ -1 +0,0 @@ -package market diff --git a/market/fakelm/iface.go b/market/fakelm/iface.go deleted file mode 100644 index 1bc91b35e..000000000 --- a/market/fakelm/iface.go +++ /dev/null @@ -1,33 +0,0 @@ -package fakelm - -import ( - "context" - - "github.com/google/uuid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -// MinimalLMApi is a subset of the LotusMiner API that is exposed by Curio -// for consumption by boost -type MinimalLMApi interface { - ActorAddress(context.Context) (address.Address, error) - - WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) - - SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) - - SectorsList(context.Context) ([]abi.SectorNumber, error) - SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) - - SectorsListInStates(context.Context, []api.SectorState) ([]abi.SectorNumber, error) - - StorageRedeclareLocal(context.Context, *storiface.ID, bool) error - - ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) -} diff --git a/market/fakelm/lmimpl.go b/market/fakelm/lmimpl.go deleted file mode 100644 index 65a90d959..000000000 --- a/market/fakelm/lmimpl.go +++ /dev/null @@ -1,528 +0,0 @@ -package fakelm - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "net/http" - "net/url" - - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - logging "github.com/ipfs/go-log/v2" - typegen "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/curio/api" - "github.com/filecoin-project/curio/deps/config" - "github.com/filecoin-project/curio/harmony/harmonydb" - "github.com/filecoin-project/curio/lib/paths" - "github.com/filecoin-project/curio/market" - - lapi "github.com/filecoin-project/lotus/api" - market2 "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - sealing "github.com/filecoin-project/lotus/storage/pipeline" - lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lmrpc") - -type LMRPCProvider struct { - si paths.SectorIndex - full api.Chain - - maddr address.Address // lotus-miner RPC is single-actor - minerID abi.ActorID - - ssize abi.SectorSize - - pi market.Ingester - db *harmonydb.DB - conf *config.CurioConfig -} - -func NewLMRPCProvider(si paths.SectorIndex, full api.Chain, maddr address.Address, minerID abi.ActorID, ssize abi.SectorSize, pi market.Ingester, db *harmonydb.DB, conf *config.CurioConfig) *LMRPCProvider { - return &LMRPCProvider{ - si: si, - full: full, - maddr: maddr, - minerID: minerID, - ssize: ssize, - pi: pi, - db: db, - conf: conf, - } -} - -func (l *LMRPCProvider) ActorAddress(ctx context.Context) (address.Address, error) { - return l.maddr, nil -} - -func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { - // correct enough - return map[uuid.UUID][]storiface.WorkerJob{}, nil -} - -func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (lapi.SectorInfo, error) { - // TODO: Add snap, Add open_sector_pieces - - var ssip []struct { - PieceCID *string `db:"piece_cid"` - DealID *int64 `db:"f05_deal_id"` - DDOPAM *string `db:"ddo_pam"` - Complete bool `db:"after_commit_msg_success"` - Failed bool `db:"failed"` - SDR bool `db:"after_sdr"` - PoRep bool `db:"after_porep"` - Tree bool `db:"after_tree_r"` - IsSnap bool `db:"is_snap"` - Encode bool `db:"after_encode"` - SnapProve bool `db:"after_prove"` - SnapCommit bool `db:"after_prove_msg_success"` - SnapMoveStorage bool `db:"after_move_storage"` - } - - err := l.db.Select(ctx, &ssip, ` - WITH SectorMeta AS ( - SELECT - sm.sp_id, - sm.sector_num, - sm.orig_sealed_cid, - sm.cur_sealed_cid - FROM - sectors_meta sm - WHERE - sm.sp_id = $1 AND sm.sector_num = $2 - ), - SDRMeta AS ( - SELECT - sp.sp_id, - sp.sector_number, - sp.after_commit_msg, - sp.failed, - sp.after_sdr, - sp.after_porep, - sp.after_tree_r, - sp.after_commit_msg_success - FROM - sectors_sdr_pipeline sp - WHERE - sp.sp_id = $1 AND sp.sector_number = $2 - ), - CheckCommit AS ( - SELECT - COALESCE(sp.sp_id, sm.sp_id) AS sp_id, - COALESCE(sp.sector_number, sm.sector_num) AS sector_number, - COALESCE(sp.after_commit_msg, TRUE) AS after_commit_msg, - COALESCE(sp.failed, FALSE) AS failed, - COALESCE(sp.after_sdr, TRUE) AS after_sdr, - COALESCE(sp.after_porep, TRUE) AS after_porep, - COALESCE(sp.after_tree_r, TRUE) AS after_tree_r, - COALESCE(sp.after_commit_msg_success, TRUE) AS after_commit_msg_success, - COALESCE(snap.after_prove_msg_success, snap.after_prove_msg_success is null) AS after_snap_msg_success, - COALESCE(sm.orig_sealed_cid != sm.cur_sealed_cid, FALSE) AS is_snap, - COALESCE(snap.after_encode, COALESCE(sm.orig_sealed_cid != sm.cur_sealed_cid, FALSE)) AS after_encode, - COALESCE(snap.after_prove, COALESCE(sm.orig_sealed_cid != sm.cur_sealed_cid, FALSE)) AS after_prove, - COALESCE(snap.after_prove_msg_success, COALESCE(sm.orig_sealed_cid != sm.cur_sealed_cid, FALSE)) AS after_prove_msg_success, - COALESCE(snap.after_move_storage, COALESCE(sm.orig_sealed_cid != sm.cur_sealed_cid, FALSE)) AS after_move_storage - FROM - SDRMeta sp - FULL OUTER JOIN SectorMeta sm ON sp.sp_id = sm.sp_id AND sp.sector_number = sm.sector_num - LEFT JOIN sectors_snap_pipeline snap ON sm.sp_id = snap.sp_id AND sm.sector_num = snap.sector_number - WHERE - (sp.sp_id = $1 AND sp.sector_number = $2) OR (sm.sp_id = $1 AND sm.sector_num = $2) - ), - MetaPieces AS ( - SELECT - mp.piece_cid, - mp.f05_deal_id, - mp.ddo_pam as ddo_pam, - cc.after_commit_msg_success AND after_snap_msg_success as after_commit_msg_success, - cc.failed, - cc.after_sdr, - cc.after_tree_r, - cc.after_porep, - cc.is_snap, - cc.after_encode, - cc.after_prove, - cc.after_prove_msg_success, - cc.after_move_storage - FROM - sectors_meta_pieces mp - INNER JOIN - CheckCommit cc ON mp.sp_id = cc.sp_id AND mp.sector_num = cc.sector_number - WHERE - cc.after_commit_msg IS TRUE - ), - InitialPieces AS ( - SELECT - ip.piece_cid, - ip.f05_deal_id, - ip.direct_piece_activation_manifest as ddo_pam, - cc.after_commit_msg_success, - cc.failed, - cc.after_sdr, - cc.after_tree_r, - cc.after_porep, - FALSE as is_snap, - FALSE as after_encode, - FALSE as after_prove, - FALSE as after_prove_msg_success, - FALSE as after_move_storage - FROM - sectors_sdr_initial_pieces ip - INNER JOIN - CheckCommit cc ON ip.sp_id = cc.sp_id AND ip.sector_number = cc.sector_number - WHERE - cc.after_commit_msg IS FALSE - ), - InitialPiecesSnap AS ( - SELECT - ip.piece_cid, - NULL::bigint as f05_deal_id, - ip.direct_piece_activation_manifest as ddo_pam, - FALSE as after_commit_msg_success, - FALSE as failed, - FALSE AS after_sdr, - FALSE AS after_tree_r, - FALSE AS after_porep, - TRUE AS is_snap, - cc.after_encode, - cc.after_prove, - cc.after_prove_msg_success, - cc.after_move_storage - FROM - sectors_snap_initial_pieces ip - INNER JOIN - CheckCommit cc ON ip.sp_id = cc.sp_id AND ip.sector_number = cc.sector_number - WHERE - cc.after_commit_msg IS TRUE - ), - FallbackPieces AS ( - SELECT - op.piece_cid, - op.f05_deal_id, - op.direct_piece_activation_manifest as ddo_pam, - FALSE as after_commit_msg_success, - FALSE as failed, - FALSE as after_sdr, - FALSE as after_tree_r, - FALSE as after_porep, - op.is_snap as is_snap, - FALSE as after_encode, - FALSE as after_prove, - FALSE as after_prove_msg_success, - FALSE as after_move_storage - FROM - open_sector_pieces op - WHERE - op.sp_id = $1 AND op.sector_number = $2 - AND NOT EXISTS (SELECT 1 FROM sectors_sdr_pipeline sp WHERE sp.sp_id = op.sp_id AND sp.sector_number = op.sector_number) - ) - SELECT * FROM MetaPieces - UNION ALL - SELECT * FROM InitialPiecesSnap - UNION ALL - SELECT * FROM InitialPieces - UNION ALL - SELECT * FROM FallbackPieces;`, l.minerID, sid) - if err != nil { - return lapi.SectorInfo{}, err - } - - var deals []abi.DealID - var seenDealIDs = make(map[abi.DealID]struct{}) - var isSnap bool - - if len(ssip) > 0 { - for _, d := range ssip { - var dealID abi.DealID - - if d.DealID != nil { - dealID = abi.DealID(*d.DealID) - } else if d.DDOPAM != nil { - var pam miner.PieceActivationManifest - err := json.Unmarshal([]byte(*d.DDOPAM), &pam) - if err != nil { - return lapi.SectorInfo{}, err - } - if len(pam.Notify) != 1 { - continue - } - if pam.Notify[0].Address != market2.Address { - continue - } - maj, val, err := typegen.CborReadHeaderBuf(bytes.NewReader(pam.Notify[0].Payload), make([]byte, 9)) - if err != nil { - return lapi.SectorInfo{}, err - } - if maj != typegen.MajUnsignedInt { - log.Errorw("deal id not an unsigned int", "maj", maj) - continue - } - dealID = abi.DealID(val) - } - - if !isSnap && d.IsSnap { - isSnap = true - } - - if _, ok := seenDealIDs[dealID]; !ok { - deals = append(deals, dealID) - seenDealIDs[dealID] = struct{}{} - } - } - } - - spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, miner.SealProofVariant_Standard) // good enough, just need this for ssize anyways - if err != nil { - return lapi.SectorInfo{}, err - } - - ret := lapi.SectorInfo{ - SectorID: sid, - CommD: nil, - CommR: nil, - Proof: nil, - Deals: deals, - Pieces: nil, - Ticket: lapi.SealTicket{}, - Seed: lapi.SealSeed{}, - PreCommitMsg: nil, - CommitMsg: nil, - Retries: 0, - ToUpgrade: false, - ReplicaUpdateMessage: nil, - LastErr: "", - Log: nil, - SealProof: spt, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - } - - // If no rows found i.e. sector doesn't exist in DB - - if len(ssip) == 0 { - ret.State = lapi.SectorState(sealing.UndefinedSectorState) - return ret, nil - } - currentSSIP := ssip[0] - - switch { - case isSnap && !currentSSIP.Encode: - ret.State = lapi.SectorState(sealing.UpdateReplica) - case currentSSIP.Encode && !currentSSIP.SnapProve: - ret.State = lapi.SectorState(sealing.ProveReplicaUpdate) - case currentSSIP.SnapProve && !currentSSIP.SnapCommit: - ret.State = lapi.SectorState(sealing.SubmitReplicaUpdate) - case currentSSIP.SnapCommit && !currentSSIP.SnapMoveStorage: - ret.State = lapi.SectorState(sealing.FinalizeReplicaUpdate) - case currentSSIP.SnapMoveStorage: - ret.State = lapi.SectorState(sealing.Proving) - case currentSSIP.Failed: - ret.State = lapi.SectorState(sealing.FailedUnrecoverable) - case !isSnap && !currentSSIP.SDR: - ret.State = lapi.SectorState(sealing.PreCommit1) - case currentSSIP.SDR && !currentSSIP.Tree: - ret.State = lapi.SectorState(sealing.PreCommit2) - case currentSSIP.SDR && currentSSIP.Tree && !currentSSIP.PoRep: - ret.State = lapi.SectorState(sealing.Committing) - case currentSSIP.SDR && currentSSIP.Tree && currentSSIP.PoRep && !currentSSIP.Complete: - ret.State = lapi.SectorState(sealing.FinalizeSector) - case currentSSIP.Complete: - ret.State = lapi.SectorState(sealing.Proving) - default: - return lapi.SectorInfo{}, nil - } - return ret, nil -} - -func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - var out []abi.SectorNumber - for _, decl := range decls { - for _, s := range decl { - if s.Miner != l.minerID { - continue - } - - out = append(out, s.SectorID.Number) - } - } - - return out, nil -} - -type sectorParts struct { - sealed, unsealed, cache bool - inStorage bool -} - -func (l *LMRPCProvider) SectorsSummary(ctx context.Context) (map[lapi.SectorState]int, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - states := map[abi.SectorID]sectorParts{} - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - - out := map[lapi.SectorState]int{} - for _, state := range states { - switch { - case state.sealed && state.inStorage: - out[lapi.SectorState(sealing.Proving)]++ - default: - // not even close to correct, but good enough for now - out[lapi.SectorState(sealing.PreCommit1)]++ - } - } - - return out, nil -} - -func (l *LMRPCProvider) SectorsListInStates(ctx context.Context, want []lapi.SectorState) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - wantProving, wantPrecommit1 := false, false - for _, s := range want { - switch s { - case lapi.SectorState(sealing.Proving): - wantProving = true - case lapi.SectorState(sealing.PreCommit1): - wantPrecommit1 = true - } - } - - states := map[abi.SectorID]sectorParts{} - - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - var out []abi.SectorNumber - - for id, state := range states { - switch { - case state.sealed && state.inStorage: - if wantProving { - out = append(out, id.Number) - } - default: - // not even close to correct, but good enough for now - if wantPrecommit1 { - out = append(out, id.Number) - } - } - } - - return out, nil -} - -func (l *LMRPCProvider) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, b bool) error { - // so this rescans and redeclares sectors on lotus-miner; whyyy is boost even calling this? - - return nil -} - -func (l *LMRPCProvider) IsUnsealed(ctx context.Context, sectorNum abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - sectorID := abi.SectorID{Miner: l.minerID, Number: sectorNum} - - si, err := l.si.StorageFindSector(ctx, sectorID, storiface.FTUnsealed, 0, false) - if err != nil { - return false, err - } - - // yes, yes, technically sectors can be partially unsealed, but that is never done in practice - // and can't even be easily done with the current implementation - return len(si) > 0, nil -} - -func (l *LMRPCProvider) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return abi.PieceInfo{}, xerrors.Errorf("not supported") -} - -func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d lapi.PieceDealInfo) (lapi.SectorOffset, error) { - if d.DealProposal.PieceSize != abi.PaddedPieceSize(l.ssize) { - return lapi.SectorOffset{}, xerrors.Errorf("only full-sector pieces are supported") - } - - return lapi.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector") -} - -func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (lapi.SectorOffset, error) { - return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header) -} - -func (l *LMRPCProvider) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { - type jwtPayload struct { - Allow []auth.Permission - } - - p := jwtPayload{ - Allow: perms, - } - - sk, err := base64.StdEncoding.DecodeString(l.conf.Apis.StorageRPCSecret) - if err != nil { - return nil, xerrors.Errorf("decode secret: %w", err) - } - - return jwt.Sign(&p, jwt.NewHS256(sk)) -} - -var _ MinimalLMApi = &LMRPCProvider{} diff --git a/market/indexstore/create.cql b/market/indexstore/create.cql new file mode 100644 index 000000000..50cd1865a --- /dev/null +++ b/market/indexstore/create.cql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS PayloadToPiece ( + PieceCid BLOB, + PayloadMultihash BLOB, -- 20 bytes trimmed + BlockOffset BIGINT, + BlockSize BIGINT, + PRIMARY KEY (PayloadMultihash, PieceCid) +) WITH transactions = { 'enabled' : true }; + +CREATE INDEX IF NOT EXISTS idx_piece_multihash + ON PayloadToPiece (PieceCid, PayloadMultihash); \ No newline at end of file diff --git a/market/indexstore/indexstore.go b/market/indexstore/indexstore.go new file mode 100644 index 000000000..4f70ba065 --- /dev/null +++ b/market/indexstore/indexstore.go @@ -0,0 +1,297 @@ +package indexstore + +import ( + "context" + _ "embed" + "errors" + "fmt" + "strings" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/multiformats/go-multihash" + "github.com/yugabyte/gocql" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/deps/config" +) + +const keyspace = "curio" + +//go:embed create.cql +var createCQL string + +var log = logging.Logger("indexstore") + +type settings struct { + // Number of records per insert batch + InsertBatchSize int + // Number of concurrent inserts to split AddIndex/DeleteIndex calls to + InsertConcurrency int +} + +type IndexStore struct { + settings settings + cluster *gocql.ClusterConfig + session *gocql.Session + ctx context.Context +} + +type OffsetSize struct { + // Offset is the offset into the CAR file of the section, where a section + // is
+ Offset uint64 `json:"offset"` + // Size is the size of the block data (not the whole section) + Size uint64 `json:"size"` +} + +type Record struct { + Cid cid.Cid `json:"cid"` + OffsetSize `json:"offsetsize"` +} + +// Probability of a collision in two 20 byte hashes (birthday problem): +// 2^(20*8/2) = 1.4 x 10^24 +const multihashLimitBytes = 20 + +// trimMultihash trims the multihash to the last multihashLimitBytes bytes +func trimMultihash(mh multihash.Multihash) []byte { + var idx int + if len(mh) > multihashLimitBytes { + idx = len(mh) - multihashLimitBytes + } + return mh[idx:] +} + +var ErrNotFound = errors.New("not found") + +func normalizeMultihashError(m multihash.Multihash, err error) error { + if err == nil { + return nil + } + if isNotFoundErr(err) { + return fmt.Errorf("multihash %s: %w", m, ErrNotFound) + } + return err +} + +func isNotFoundErr(err error) bool { + if err == nil { + return false + } + + if errors.Is(err, gocql.ErrNotFound) { + return true + } + + // Unfortunately it seems like the Cassandra driver doesn't always return + // a specific not found error type, so we need to rely on string parsing + return strings.Contains(strings.ToLower(err.Error()), "not found") +} + +func NewIndexStore(hosts []string, cfg *config.CurioConfig) (*IndexStore, error) { + if len(hosts) == 0 { + return nil, xerrors.Errorf("no hosts provided for cassandra") + } + + cluster := gocql.NewCluster(hosts...) + cluster.Timeout = time.Minute + + store := &IndexStore{ + cluster: cluster, + settings: settings{ + InsertBatchSize: cfg.Market.StorageMarketConfig.Indexing.InsertBatchSize, + InsertConcurrency: cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency, + }, + } + + return store, store.Start(context.Background()) +} + +func (i *IndexStore) Start(ctx context.Context) error { + // Create cassandra keyspace + session, err := i.cluster.CreateSession() + if err != nil { + return xerrors.Errorf("creating cassandra session: %w", err) + + } + query := `CREATE KEYSPACE IF NOT EXISTS ` + keyspace + + ` WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }` + err = session.Query(query).WithContext(ctx).Exec() + if err != nil { + return xerrors.Errorf("creating cassandra keyspace: %w", err) + } + + session.Close() + + // Recreate session with the keyspace + i.cluster.Keyspace = keyspace + session, err = i.cluster.CreateSession() + if err != nil { + return xerrors.Errorf("creating cassandra session: %w", err) + + } + + lines := strings.Split(createCQL, ";") + for _, line := range lines { + line = strings.Trim(line, "\n \t") + if line == "" { + continue + } + log.Debug(line) + err := session.Query(line).WithContext(ctx).Exec() + if err != nil { + return xerrors.Errorf("creating tables: executing\n%s\n%w", line, err) + } + } + + i.session = session + i.ctx = ctx + + return nil +} + +// AddIndex adds multihash -> piece cid mappings, along with offset / size information for the piece. +// It takes a context, the piece cid, and a slice of Record structs as arguments. +// It returns an error if any error occurs during the execution. +func (i *IndexStore) AddIndex(ctx context.Context, pieceCid cid.Cid, recordsChan chan Record) error { + Qry := `INSERT INTO PayloadToPiece (PieceCid, PayloadMultihash, BlockOffset, BlockSize) VALUES (?, ?, ?, ?)` + pieceCidBytes := pieceCid.Bytes() + + var eg errgroup.Group + + // Start worker threads based on InsertConcurrency value + // These workers will be further batch based on InsertBatchSize in the + // goroutines for each BatchInsert operation + for worker := 0; worker < i.settings.InsertConcurrency; worker++ { + eg.Go(func() error { + var batch *gocql.Batch + // Running a loop on all instead of creating batches require less memory + for { + if batch == nil { + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + batch.Entries = make([]gocql.BatchEntry, 0, i.settings.InsertBatchSize) + } + + rec, ok := <-recordsChan + + if !ok { + if len(batch.Entries) > 0 { + err := i.session.ExecuteBatch(batch) + if err != nil { + return fmt.Errorf("executing batch insert for piece %s: %w", pieceCid, err) + } + } + return nil + } + + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: Qry, + Args: []any{pieceCidBytes, trimMultihash(rec.Cid.Hash()), rec.Offset, rec.Size}, + Idempotent: true, + }) + + if len(batch.Entries) == i.settings.InsertBatchSize { + err := func() error { + defer func(start time.Time) { + log.Debugw("addIndex Batch Insert", "took", time.Since(start), "entries", len(batch.Entries)) + }(time.Now()) + + err := i.session.ExecuteBatch(batch) + if err != nil { + return fmt.Errorf("executing batch insert for piece %s: %w", pieceCid, err) + } + return nil + }() + if err != nil { + return err + } + batch = nil + } + } + }) + } + + err := eg.Wait() + if err != nil { + return err + } + + return nil +} + +// RemoveIndexes removes all multihash -> piece cid mappings, and all +// offset / size information for the piece. +func (i *IndexStore) RemoveIndexes(ctx context.Context, pieceCid cid.Cid) error { + delQry := `DELETE FROM PayloadToPiece WHERE PayloadMultihash = ? AND PieceCid = ?` + pieceCidBytes := pieceCid.Bytes() + + // Get multihashes for piece + getQry := `SELECT PayloadMultihash FROM PayloadToPiece WHERE PieceCid = ?` + iter := i.session.Query(getQry, pieceCidBytes).WithContext(ctx).Iter() + + // Create batch for deletion + batch := i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + batch.Entries = make([]gocql.BatchEntry, 0, i.settings.InsertBatchSize) + + var payloadMHBz []byte + for iter.Scan(&payloadMHBz) { + // Add each delete operation to batch + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: delQry, + Args: []any{payloadMHBz, pieceCidBytes}, + Idempotent: true, + }) + + // Execute batch + if len(batch.Entries) >= i.settings.InsertBatchSize { + err := i.session.ExecuteBatch(batch) + if err != nil { + return xerrors.Errorf("executing batch delete for piece %s: %w", pieceCid, err) + } + // Create a new batch after executing + batch = i.session.NewBatch(gocql.UnloggedBatch).WithContext(ctx) + batch.Entries = make([]gocql.BatchEntry, 0, i.settings.InsertBatchSize) + } + } + + // Execute remaining operations in the batch + if len(batch.Entries) > 0 { + err := i.session.ExecuteBatch(batch) + if err != nil { + return xerrors.Errorf("executing batch delete for piece %s: %w", pieceCid, err) + } + } + + if err := iter.Close(); err != nil { + return xerrors.Errorf("Getting piece index for piece %s: %w", pieceCid, err) + } + + return nil +} + +// PiecesContainingMultihash gets all pieces that contain a multihash (used when retrieving by payload CID) +func (i *IndexStore) PiecesContainingMultihash(ctx context.Context, m multihash.Multihash) ([]cid.Cid, error) { + var pcids []cid.Cid + var bz []byte + qry := `SELECT PieceCid FROM PayloadToPiece WHERE PayloadMultihash = ?` + iter := i.session.Query(qry, trimMultihash(m)).WithContext(ctx).Iter() + for iter.Scan(&bz) { + pcid, err := cid.Parse(bz) + if err != nil { + return nil, fmt.Errorf("parsing piece cid: %w", err) + } + pcids = append(pcids, pcid) + } + if err := iter.Close(); err != nil { + return nil, fmt.Errorf("getting pieces containing multihash %s: %w", m, err) + } + + // No pieces found for multihash, return a "not found" error + if len(pcids) == 0 { + return nil, normalizeMultihashError(m, ErrNotFound) + } + return pcids, nil +} diff --git a/market/indexstore/indexstore_test.go b/market/indexstore/indexstore_test.go new file mode 100644 index 000000000..79e174fb8 --- /dev/null +++ b/market/indexstore/indexstore_test.go @@ -0,0 +1,125 @@ +package indexstore + +import ( + "context" + "io" + "os" + "testing" + "time" + + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/filecoin-project/go-commp-utils/writer" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/lib/testutils" +) + +func envElse(env, els string) string { + if v := os.Getenv(env); v != "" { + return v + } + return els +} + +func TestNewIndexStore(t *testing.T) { + // Set up the indexStore for testing + + ctx := context.Background() + cfg := config.DefaultCurioConfig() + + idxStore, err := NewIndexStore([]string{envElse("CURIO_HARMONYDB_HOSTS", "127.0.0.1")}, cfg) + require.NoError(t, err) + + // Create a car file and calculate commP + dir, err := os.MkdirTemp(os.TempDir(), "curio-indexstore") + require.NoError(t, err) + defer func() { + _ = os.RemoveAll(dir) + }() + + rf, err := testutils.CreateRandomFile(dir, int(time.Now().Unix()), 8000000) + require.NoError(t, err) + + caropts := []carv2.Option{ + blockstore.WriteAsCarV1(true), + } + + _, cn, err := testutils.CreateDenseCARWith(dir, rf, 1024, 1024, caropts) + require.NoError(t, err) + + f, err := os.Open(cn) + require.NoError(t, err) + + defer func() { + _ = f.Close() + }() + + w := &writer.Writer{} + _, err = io.CopyBuffer(w, f, make([]byte, writer.CommPBuf)) + require.NoError(t, err) + + commp, err := w.Sum() + require.NoError(t, err) + + _, err = f.Seek(0, io.SeekStart) + require.NoError(t, err) + + // Create recods + dealCfg := cfg.Market.StorageMarketConfig + chanSize := dealCfg.Indexing.InsertConcurrency * dealCfg.Indexing.InsertBatchSize + + recs := make(chan Record, chanSize) + opts := []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)} + blockReader, err := carv2.NewBlockReader(f, opts...) + require.NoError(t, err) + + // Add index to the store + var eg errgroup.Group + eg.Go(func() error { + serr := idxStore.AddIndex(ctx, commp.PieceCID, recs) + return serr + }) + + var m multihash.Multihash + i := 0 + + blockMetadata, err := blockReader.SkipNext() + for err == nil { + if i == 0 { + m = blockMetadata.Hash() + } + recs <- Record{ + Cid: blockMetadata.Cid, + OffsetSize: OffsetSize{ + Offset: blockMetadata.SourceOffset, + Size: blockMetadata.Size, + }, + } + i++ + + blockMetadata, err = blockReader.SkipNext() + } + require.Error(t, io.EOF) + close(recs) + err = eg.Wait() + require.NoError(t, err) + + // Try to find a multihash + pcids, err := idxStore.PiecesContainingMultihash(ctx, m) + require.NoError(t, err) + require.Len(t, pcids, 1) + require.Equal(t, pcids[0], commp.PieceCID) + + // Remove all indexes from the store + err = idxStore.RemoveIndexes(ctx, pcids[0]) + require.NoError(t, err) + + // Drop the table + err = idxStore.session.Query("DROP TABLE PayloadToPiece").Exec() + require.NoError(t, err) +} diff --git a/market/libp2p/libp2p.go b/market/libp2p/libp2p.go new file mode 100644 index 000000000..3a57b635f --- /dev/null +++ b/market/libp2p/libp2p.go @@ -0,0 +1,669 @@ +package libp2p + +import ( + "context" + "encoding/json" + "fmt" + "runtime/debug" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/metrics" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "github.com/multiformats/go-multiaddr" + "github.com/samber/lo" + mamask "github.com/whyrusleeping/multiaddr-filter" + "go.uber.org/zap" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + + "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/market/mk12" + "github.com/filecoin-project/curio/market/mk12/legacytypes" + + "github.com/filecoin-project/lotus/chain/types" +) + +var log = logging.Logger("curio-libp2p") + +func NewLibp2pHost(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, machine string) (host.Host, error) { + lcfg, err := getCfg(ctx, db, cfg.Market.StorageMarketConfig.MK12.Libp2p, machine) + if err != nil { + return nil, err + } + + pstore, err := pstoremem.NewPeerstore() + if err != nil { + return nil, fmt.Errorf("creating peer store: %w", err) + } + + pubK := lcfg.priv.GetPublic() + id, err := peer.IDFromPublicKey(pubK) + if err != nil { + return nil, fmt.Errorf("getting peer ID: %w", err) + } + + err = pstore.AddPrivKey(id, lcfg.priv) + if err != nil { + return nil, fmt.Errorf("adding private key to peerstore: %w", err) + } + err = pstore.AddPubKey(id, pubK) + if err != nil { + return nil, fmt.Errorf("adding public key to peerstore: %w", err) + } + + addrFactory, err := MakeAddrsFactory(lcfg.AnnounceAddr, lcfg.NoAnnounceAddr) + if err != nil { + return nil, fmt.Errorf("creating address factory: %w", err) + } + + opts := []libp2p.Option{ + libp2p.DefaultTransports, + libp2p.ListenAddrs(lcfg.ListenAddr...), + libp2p.AddrsFactory(addrFactory), + libp2p.Peerstore(pstore), + libp2p.UserAgent("curio-" + build.UserVersion()), + libp2p.Ping(true), + libp2p.EnableNATService(), + libp2p.BandwidthReporter(metrics.NewBandwidthCounter()), + } + + h, err := libp2p.New(opts...) + if err != nil { + return nil, xerrors.Errorf("creating libp2p host: %w", err) + } + + // Start listening + err = h.Network().Listen(lcfg.ListenAddr...) + if err != nil { + return nil, xerrors.Errorf("failed to listen on addresses: %w", err) + } + + log.Infof("Libp2p started listening") + + // Start a goroutine to update updated_at colum of libp2p table and release lock at node shutdown + go func() { + ticker := time.NewTicker(time.Second * 30) + defer func(h host.Host) { + err := h.Close() + if err != nil { + log.Error("could not stop libp2p node: %w", err) + } + }(h) + for { + select { + case <-ctx.Done(): + log.Info("Releasing libp2p claims") + _, err := db.Exec(ctx, `UPDATE libp2p SET running_on = NULL`) + if err != nil { + log.Error("Cleaning up libp2p claims ", err) + } + return + case <-ticker.C: + n, err := db.Exec(ctx, `UPDATE libp2p SET updated_at=CURRENT_TIMESTAMP WHERE running_on = $1`, machine) + if err != nil { + log.Error("Cannot keepalive ", err) + } + if n != 1 { + log.Error("could not update the DB, possibly lost the libp2p lock to some other node") + return + } + } + } + }() + + return h, err + +} + +type libp2pCfg struct { + priv crypto.PrivKey + ListenAddr []multiaddr.Multiaddr + AnnounceAddr []multiaddr.Multiaddr + NoAnnounceAddr []multiaddr.Multiaddr +} + +func getCfg(ctx context.Context, db *harmonydb.DB, cfg config.Libp2pConfig, machine string) (*libp2pCfg, error) { + // Try to acquire the lock in DB + _, err := db.Exec(ctx, `SELECT update_libp2p_node ($1)`, machine) + if err != nil { + return nil, xerrors.Errorf("acquiring libp2p locks from DB: %w", err) + } + + var ret libp2pCfg + + for _, l := range cfg.ListenAddresses { + listenAddr, err := multiaddr.NewMultiaddr(l) + if err != nil { + return nil, xerrors.Errorf("parsing listen address: %w", err) + } + ret.ListenAddr = append(ret.ListenAddr, listenAddr) + } + + for _, a := range cfg.AnnounceAddresses { + announceAddr, err := multiaddr.NewMultiaddr(a) + if err != nil { + return nil, xerrors.Errorf("parsing announce address: %w", err) + } + ret.AnnounceAddr = append(ret.AnnounceAddr, announceAddr) + } + + for _, na := range cfg.NoAnnounceAddresses { + noAnnounceAddr, err := multiaddr.NewMultiaddr(na) + if err != nil { + return nil, xerrors.Errorf("parsing no announce address: %w", err) + } + ret.NoAnnounceAddr = append(ret.NoAnnounceAddr, noAnnounceAddr) + } + + var privKey []byte + + err = db.QueryRow(ctx, `SELECT priv_key FROM libp2p`).Scan(&privKey) + if err != nil { + return nil, xerrors.Errorf("getting private key from DB: %w", err) + } + + p, err := crypto.UnmarshalPrivateKey(privKey) + if err != nil { + return nil, xerrors.Errorf("unmarshaling private key: %w", err) + } + + ret.priv = p + + return &ret, nil +} + +func MakeAddrsFactory(announceAddrs, noAnnounce []multiaddr.Multiaddr) (basichost.AddrsFactory, error) { + filters := multiaddr.NewFilters() + noAnnAddrs := map[string]bool{} + for _, addr := range noAnnounce { + f, err := mamask.NewMask(addr.String()) + if err == nil { + filters.AddFilter(*f, multiaddr.ActionDeny) + continue + } + noAnnAddrs[string(addr.Bytes())] = true + } + + return func(allAddrs []multiaddr.Multiaddr) []multiaddr.Multiaddr { + var addrs []multiaddr.Multiaddr + if len(announceAddrs) > 0 { + addrs = announceAddrs + } else { + addrs = allAddrs + } + + var out []multiaddr.Multiaddr + for _, maddr := range addrs { + // check for exact matches + ok := noAnnAddrs[string(maddr.Bytes())] + // check for /ipcidr matches + if !ok && !filters.AddrBlocked(maddr) { + out = append(out, maddr) + } + } + return out + }, nil +} + +var netlog = logging.Logger("mk12-net") +var propLog = logging.Logger("mk12-prop") + +const DealProtocolv120ID = "/fil/storage/mk/1.2.0" +const DealProtocolv121ID = "/fil/storage/mk/1.2.1" +const DealStatusV12ProtocolID = "/fil/storage/status/1.2.0" + +// The time limit to read a message from the client when the client opens a stream +const providerReadDeadline = 10 * time.Second + +// The time limit to write a response to the client +const providerWriteDeadline = 10 * time.Second + +func SafeHandle(h network.StreamHandler) network.StreamHandler { + defer func() { + if r := recover(); r != nil { + netlog.Error("panic occurred", "stack", debug.Stack()) + } + }() + + return h +} + +// DealProvider listens for incoming deal proposals over libp2p +type DealProvider struct { + ctx context.Context + host host.Host + prov *mk12.MK12 + api mk12libp2pAPI + db *harmonydb.DB + disabledMiners []address.Address +} + +type mk12libp2pAPI interface { + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) +} + +func NewDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, prov *mk12.MK12, api mk12libp2pAPI, machine string) error { + h, err := NewLibp2pHost(ctx, db, cfg, machine) + if err != nil { + return xerrors.Errorf("failed to start libp2p nodes: %w", err) + } + + var disabledMiners []address.Address + + for _, m := range cfg.Market.StorageMarketConfig.MK12.Libp2p.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return err + } + disabledMiners = append(disabledMiners, maddr) + } + + p := &DealProvider{ + ctx: ctx, + host: h, + prov: prov, + api: api, + db: db, + disabledMiners: disabledMiners, + } + + p.Start(ctx, h) + + return nil +} + +func (p *DealProvider) Start(ctx context.Context, host host.Host) { + // Note that the handling for deal protocol v1.2.0 and v1.2.1 is the same. + // Deal protocol v1.2.1 has a couple of new fields: SkipIPNIAnnounce and + // RemoveUnsealedCopy. + // If a client that supports deal protocol v1.2.0 sends a request to a + // boostd server that supports deal protocol v1.2.1, the DealParams struct + // will be missing these new fields. + // When the DealParams struct is unmarshalled the missing fields will be + // set to false, which maintains the previous behaviour: + // - SkipIPNIAnnounce=false: announce deal to IPNI + // - RemoveUnsealedCopy=false: keep unsealed copy of deal data + host.SetStreamHandler(DealProtocolv121ID, SafeHandle(p.handleNewDealStream)) + host.SetStreamHandler(DealProtocolv120ID, SafeHandle(p.handleNewDealStream)) + host.SetStreamHandler(DealStatusV12ProtocolID, SafeHandle(p.handleNewDealStatusStream)) + + // Handle Query Ask + host.SetStreamHandler(legacytypes.AskProtocolID, SafeHandle(p.handleNewAskStream)) + + // Wait for context cancellation + + <-ctx.Done() + host.RemoveStreamHandler(DealProtocolv121ID) + host.RemoveStreamHandler(DealProtocolv120ID) + host.RemoveStreamHandler(DealStatusV12ProtocolID) + host.RemoveStreamHandler(legacytypes.AskProtocolID) +} + +// Called when the client opens a libp2p stream with a new deal proposal +func (p *DealProvider) handleNewDealStream(s network.Stream) { + start := time.Now() + reqLogUuid := uuid.New() + reqLog := netlog.With("reqlog-uuid", reqLogUuid.String(), "client-peer", s.Conn().RemotePeer()) + reqLog.Debugw("new deal proposal request") + + defer func() { + err := s.Close() + if err != nil { + reqLog.Infow("closing stream", "err", err) + } + reqLog.Debugw("handled deal proposal request", "duration", time.Since(start).String()) + }() + + // Set a deadline on reading from the stream so it doesn't hang + _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) + + // Read the deal proposal from the stream + var proposal mk12.DealParams + err := proposal.UnmarshalCBOR(s) + _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed + if err != nil { + reqLog.Warnw("reading storage deal proposal from stream", "err", err) + return + } + + reqLog = reqLog.With("id", proposal.DealUUID) + reqLog.Infow("received deal proposal") + startExec := time.Now() + + var res *mk12.ProviderDealRejectionInfo + + if lo.Contains(p.disabledMiners, proposal.ClientDealProposal.Proposal.Provider) { + reqLog.Infow("Deal rejected as libp2p is disabled for provider", "deal", proposal.DealUUID, "provider", proposal.ClientDealProposal.Proposal.Provider) + res.Accepted = false + res.Reason = "Libp2p is disabled for the provider" + } else { + // Start executing the deal. + // Note: This method just waits for the deal to be accepted, it doesn't + // wait for deal execution to complete. + res, err := p.prov.ExecuteDeal(context.Background(), &proposal, s.Conn().RemotePeer()) + reqLog.Debugw("processed deal proposal accept") + if err != nil { + reqLog.Warnw("deal proposal failed", "err", err, "reason", res.Reason) + } + } + + // Log the response + propLog.Infow("send deal proposal response", + "id", proposal.DealUUID, + "accepted", res.Accepted, + "msg", res.Reason, + "peer id", s.Conn().RemotePeer(), + "client address", proposal.ClientDealProposal.Proposal.Client, + "provider address", proposal.ClientDealProposal.Proposal.Provider, + "piece cid", proposal.ClientDealProposal.Proposal.PieceCID.String(), + "piece size", proposal.ClientDealProposal.Proposal.PieceSize, + "verified", proposal.ClientDealProposal.Proposal.VerifiedDeal, + "label", proposal.ClientDealProposal.Proposal.Label, + "start epoch", proposal.ClientDealProposal.Proposal.StartEpoch, + "end epoch", proposal.ClientDealProposal.Proposal.EndEpoch, + "price per epoch", proposal.ClientDealProposal.Proposal.StoragePricePerEpoch, + "duration", time.Since(startExec).String(), + ) + + // Set a deadline on writing to the stream so it doesn't hang + _ = s.SetWriteDeadline(time.Now().Add(providerWriteDeadline)) + defer s.SetWriteDeadline(time.Time{}) // nolint + + // Write the response to the client + err = cborutil.WriteCborRPC(s, &mk12.DealResponse{Accepted: res.Accepted, Message: res.Reason}) + if err != nil { + reqLog.Warnw("writing deal response", "err", err) + } +} + +func (p *DealProvider) handleNewDealStatusStream(s network.Stream) { + start := time.Now() + reqLogUuid := uuid.New() + reqLog := netlog.With("reqlog-uuid", reqLogUuid.String(), "client-peer", s.Conn().RemotePeer()) + reqLog.Debugw("new deal status request") + + defer func() { + err := s.Close() + if err != nil { + reqLog.Infow("closing stream", "err", err) + } + reqLog.Debugw("handled deal status request", "duration", time.Since(start).String()) + }() + + // Read the deal status request from the stream + _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) + var req mk12.DealStatusRequest + err := req.UnmarshalCBOR(s) + _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed + if err != nil { + reqLog.Warnw("reading deal status request from stream", "err", err) + return + } + reqLog = reqLog.With("id", req.DealUUID) + reqLog.Debugw("received deal status request") + + resp := p.getDealStatus(req, reqLog) + reqLog.Debugw("processed deal status request") + + // Set a deadline on writing to the stream so it doesn't hang + _ = s.SetWriteDeadline(time.Now().Add(providerWriteDeadline)) + defer s.SetWriteDeadline(time.Time{}) // nolint + + if err := cborutil.WriteCborRPC(s, &resp); err != nil { + reqLog.Errorw("failed to write deal status response", "err", err) + } +} + +func (p *DealProvider) getDealStatus(req mk12.DealStatusRequest, reqLog *zap.SugaredLogger) mk12.DealStatusResponse { + errResp := func(err string) mk12.DealStatusResponse { + return mk12.DealStatusResponse{DealUUID: req.DealUUID, Error: err} + } + + var pdeals []struct { + AfterPSD bool `db:"after_psd"` + Sealed bool `db:"sealed"` + Indexed bool `db:"indexed"` + } + + err := p.db.Select(p.ctx, &pdeals, `SELECT + after_psd, + sealed, + indexed + FROM + market_mk12_deal_pipeline + WHERE + uuid = $1;`, req.DealUUID) + + if err != nil { + return errResp(fmt.Sprintf("failed to query the db for deal status: %s", err)) + } + + if len(pdeals) > 1 { + return errResp("found multiple entries for the same UUID, inform the storage provider") + } + + // If deal is still in pipeline + if len(pdeals) == 1 { + pdeal := pdeals[0] + // If PSD is done + if pdeal.AfterPSD { + st, err := p.getSealedDealStatus(p.ctx, req.DealUUID.String(), true) + if err != nil { + reqLog.Errorw("failed to get sealed deal status", "err", err) + return errResp("failed to get sealed deal status") + } + ret := mk12.DealStatusResponse{ + DealUUID: req.DealUUID, + DealStatus: &mk12.DealStatus{ + Error: st.Error, + Status: "Sealing", + SealingStatus: "Sealed", + Proposal: st.Proposal, + SignedProposalCid: st.SignedProposalCID, + PublishCid: &st.PublishCID, + ChainDealID: st.ChainDealID, + }, + IsOffline: st.Offline, + TransferSize: 1, + NBytesReceived: 1, + } + if pdeal.Sealed { + ret.DealStatus.Status = "Sealed" + } + if pdeal.Indexed { + ret.DealStatus.Status = "Sealed and Indexed" + } + } + // Anything before PSD is processing + st, err := p.getSealedDealStatus(p.ctx, req.DealUUID.String(), false) + if err != nil { + reqLog.Errorw("failed to get sealed deal status", "err", err) + return errResp("failed to get sealed deal status") + } + return mk12.DealStatusResponse{ + DealUUID: req.DealUUID, + DealStatus: &mk12.DealStatus{ + Error: st.Error, + Status: "Processing", + SealingStatus: "Not assigned to sector", + Proposal: st.Proposal, + SignedProposalCid: st.SignedProposalCID, + PublishCid: &st.PublishCID, + ChainDealID: st.ChainDealID, + }, + IsOffline: st.Offline, + TransferSize: 1, + NBytesReceived: 1, + } + } + + // If deal is not in deal pipeline + st, err := p.getSealedDealStatus(p.ctx, req.DealUUID.String(), true) + if err != nil { + reqLog.Errorw("failed to get sealed deal status", "err", err) + return errResp("failed to get sealed deal status") + } + + return mk12.DealStatusResponse{ + DealUUID: req.DealUUID, + DealStatus: &mk12.DealStatus{ + Error: st.Error, + Status: "Sealed", + SealingStatus: "Sealed and Indexed", + Proposal: st.Proposal, + SignedProposalCid: st.SignedProposalCID, + PublishCid: &st.PublishCID, + ChainDealID: st.ChainDealID, + }, + IsOffline: st.Offline, + TransferSize: 1, + NBytesReceived: 1, + } +} + +type dealInfo struct { + Offline bool + Error string + Proposal market.DealProposal + SignedProposalCID cid.Cid + ChainDealID abi.DealID + PublishCID cid.Cid +} + +func (p *DealProvider) getSealedDealStatus(ctx context.Context, id string, onChain bool) (dealInfo, error) { + var dealInfos []struct { + Offline bool `db:"offline"` + Error string `db:"error"` + Proposal json.RawMessage `db:"proposal"` + SignedProposalCID string `db:"signed_proposal_cid"` + } + err := p.db.Select(ctx, &dealInfos, `SELECT + offline, + error, + proposal, + signed_proposal_cid + FROM + market_mk12_deals + WHERE + uuid = $1;`, id) + + if err != nil { + return dealInfo{}, xerrors.Errorf("failed to get deal details from DB: %w", err) + } + + if len(dealInfos) != 1 { + return dealInfo{}, xerrors.Errorf("expected 1 row but got %d", len(dealInfos)) + } + + di := dealInfos[0] + + var prop market.DealProposal + err = json.Unmarshal(di.Proposal, &prop) + if err != nil { + return dealInfo{}, xerrors.Errorf("failed to unmarshal deal proposal: %w", err) + } + + spc, err := cid.Parse(di.SignedProposalCID) + if err != nil { + return dealInfo{}, xerrors.Errorf("failed to parse signed proposal CID: %w", err) + } + + ret := dealInfo{ + Offline: di.Offline, + Error: di.Error, + Proposal: prop, + SignedProposalCID: spc, + ChainDealID: abi.DealID(0), + PublishCID: cid.Undef, + } + + if !onChain { + return ret, nil + } + + var cInfos []struct { + ChainDealID int64 `db:"chain_deal_id"` + PublishCID string `db:"publish_cid"` + } + err = p.db.Select(ctx, &dealInfos, `SELECT + chain_deal_id, + publish_cid + FROM + market_mk12_deals + WHERE + uuid = $1;`, id) + + if err != nil { + return dealInfo{}, xerrors.Errorf("failed to get deal details from DB: %w", err) + } + + if len(cInfos) != 1 { + return dealInfo{}, xerrors.Errorf("expected 1 row but got %d", len(dealInfos)) + } + + ci := cInfos[0] + + pc, err := cid.Parse(ci.PublishCID) + if err != nil { + return dealInfo{}, xerrors.Errorf("failed to parse publish CID: %w", err) + } + + ret.PublishCID = pc + ret.ChainDealID = abi.DealID(ci.ChainDealID) + + return ret, nil +} + +func (p *DealProvider) handleNewAskStream(s network.Stream) { + start := time.Now() + reqLog := netlog.With("client-peer", s.Conn().RemotePeer()) + reqLog.Debugw("new queryAsk request") + + defer func() { + err := s.Close() + if err != nil { + reqLog.Infow("closing stream", "err", err) + } + reqLog.Debugw("handled queryAsk request", "duration", time.Since(start).String()) + }() + + // Read the deal status request from the stream + _ = s.SetReadDeadline(time.Now().Add(providerReadDeadline)) + var req legacytypes.AskRequest + err := req.UnmarshalCBOR(s) + _ = s.SetReadDeadline(time.Time{}) // Clear read deadline so conn doesn't get closed + if err != nil { + reqLog.Warnw("reading queryAsk request from stream", "err", err) + return + } + + var resp legacytypes.AskResponse + + resp.Ask, err = p.prov.GetAsk(p.ctx, req.Miner) + if err != nil { + reqLog.Warnw("failed to get ask from storage provider", "err", err) + } + + // Set a deadline on writing to the stream so it doesn't hang + _ = s.SetWriteDeadline(time.Now().Add(providerWriteDeadline)) + defer s.SetWriteDeadline(time.Time{}) // nolint + + if err := cborutil.WriteCborRPC(s, &resp); err != nil { + reqLog.Errorw("failed to write queryAsk response", "err", err) + } +} diff --git a/market/lmrpc/lmrpc.go b/market/lmrpc/lmrpc.go deleted file mode 100644 index c148baa3a..000000000 --- a/market/lmrpc/lmrpc.go +++ /dev/null @@ -1,678 +0,0 @@ -package lmrpc - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/uuid" - logging "github.com/ipfs/go-log/v2" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/yugabyte/pgx/v5" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/curio/api" - "github.com/filecoin-project/curio/deps/config" - "github.com/filecoin-project/curio/harmony/harmonydb" - "github.com/filecoin-project/curio/lib/paths" - cumarket "github.com/filecoin-project/curio/market" - "github.com/filecoin-project/curio/market/fakelm" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build/buildconstants" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/nullreader" - "github.com/filecoin-project/lotus/metrics/proxy" - lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lmrpc") - -const backpressureWaitTime = 30 * time.Second - -func ServeCurioMarketRPCFromConfig(db *harmonydb.DB, full api.Chain, cfg *config.CurioConfig) error { - return forEachMarketRPC(cfg, func(maddr string, listen string) error { - addr, err := address.NewFromString(maddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - go func() { - err := ServeCurioMarketRPC(db, full, addr, cfg, listen) - if err != nil { - log.Errorf("failed to serve market rpc: %s", err) - } - }() - - return nil - }) -} - -func MakeTokens(cfg *config.CurioConfig) (map[address.Address]string, error) { - out := map[address.Address]string{} - - err := forEachMarketRPC(cfg, func(smaddr string, listen string) error { - ctx := context.Background() - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - - // need minimal provider with just the config - lp := fakelm.NewLMRPCProvider(nil, nil, address.Undef, 0, 0, nil, nil, cfg) - - tok, err := lp.AuthNew(ctx, lapi.AllPermissions) - if err != nil { - return err - } - - // parse listen into multiaddr - ma, err := manet.FromNetAddr(laddr) - if err != nil { - return xerrors.Errorf("net from addr (%v): %w", laddr, err) - } - - maddr, err := address.NewFromString(smaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - token := fmt.Sprintf("%s:%s", tok, ma) - out[maddr] = token - - return nil - }) - - return out, err -} - -func forEachMarketRPC(cfg *config.CurioConfig, cb func(string, string) error) error { - for n, server := range cfg.Subsystems.BoostAdapters { - n := n - - // server: [f0.. actor address]:[bind address] - // bind address is either a numeric port or a full address - - // first split at first : to get the actor address and the bind address - split := strings.SplitN(server, ":", 2) - - // if the split length is not 2, return an error - if len(split) != 2 { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - // get the actor address and the bind address - strMaddr, strListen := split[0], split[1] - - maddr, err := address.NewFromString(strMaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - // check the listen address - if strListen == "" { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - // if listen address is numeric, prepend the default host - if _, err := strconv.Atoi(strListen); err == nil { - strListen = "0.0.0.0:" + strListen - } - // check if the listen address is a valid address - if _, _, err := net.SplitHostPort(strListen); err != nil { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - log.Infow("Starting market RPC server", "actor", maddr, "listen", strListen) - - if err := cb(strMaddr, strListen); err != nil { - return err - } - } - - return nil -} - -func ServeCurioMarketRPC(db *harmonydb.DB, full api.Chain, maddr address.Address, conf *config.CurioConfig, listen string) error { - ctx := context.Background() - - var pin cumarket.Ingester - var err error - if conf.Ingest.DoSnap { - pin, err = cumarket.NewPieceIngesterSnap(ctx, db, full, maddr, false, time.Duration(conf.Ingest.MaxDealWaitTime)) - } else { - pin, err = cumarket.NewPieceIngester(ctx, db, full, maddr, false, time.Duration(conf.Ingest.MaxDealWaitTime), conf.Subsystems.UseSyntheticPoRep) - } - - if err != nil { - return xerrors.Errorf("starting piece ingestor") - } - - si := paths.NewDBIndex(nil, db) - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - lp := fakelm.NewLMRPCProvider(si, full, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, conf) - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - rootUrl := url.URL{ - Scheme: "http", - Host: laddr.String(), - } - - ast := lapi.StorageMinerStruct{} - - ast.CommonStruct.Internal.Version = func(ctx context.Context) (lapi.APIVersion, error) { - return lapi.APIVersion{ - Version: "curio-proxy-v0", - APIVersion: lapi.MinerAPIVersion0, - BlockDelay: buildconstants.BlockDelaySecs, - }, nil - } - - pieceInfoLk := new(sync.Mutex) - pieceInfos := map[uuid.UUID][]pieceInfo{} - - ast.CommonStruct.Internal.AuthNew = lp.AuthNew - ast.Internal.ActorAddress = lp.ActorAddress - ast.Internal.WorkerJobs = lp.WorkerJobs - ast.Internal.SectorsStatus = lp.SectorsStatus - ast.Internal.SectorsList = lp.SectorsList - ast.Internal.SectorsSummary = lp.SectorsSummary - ast.Internal.SectorsListInStates = lp.SectorsListInStates - ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal - ast.Internal.ComputeDataCid = lp.ComputeDataCid - ast.Internal.SectorAddPieceToAny = sectorAddPieceToAnyOperation(maddr, rootUrl, conf, pieceInfoLk, pieceInfos, pin, db, mi.SectorSize) - ast.Internal.StorageList = si.StorageList - ast.Internal.StorageDetach = si.StorageDetach - ast.Internal.StorageReportHealth = si.StorageReportHealth - ast.Internal.StorageDeclareSector = si.StorageDeclareSector - ast.Internal.StorageDropSector = si.StorageDropSector - ast.Internal.StorageFindSector = si.StorageFindSector - ast.Internal.StorageInfo = si.StorageInfo - ast.Internal.StorageBestAlloc = si.StorageBestAlloc - ast.Internal.StorageLock = si.StorageLock - ast.Internal.StorageTryLock = si.StorageTryLock - ast.Internal.StorageGetLocks = si.StorageGetLocks - ast.Internal.SectorStartSealing = pin.SectorStartSealing - - var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { - // /piece?piece_id=xxxx - pieceUUID := r.URL.Query().Get("piece_id") - - pu, err := uuid.Parse(pieceUUID) - if err != nil { - http.Error(w, "bad piece id", http.StatusBadRequest) - return - } - - if r.Method != http.MethodGet { - http.Error(w, "bad method", http.StatusMethodNotAllowed) - return - } - - fmt.Printf("%s request for piece from %s\n", pieceUUID, r.RemoteAddr) - - pieceInfoLk.Lock() - pis, ok := pieceInfos[pu] - if !ok { - http.Error(w, "piece not found", http.StatusNotFound) - log.Warnw("piece not found", "piece_uuid", pu) - pieceInfoLk.Unlock() - return - } - - // pop - pi := pis[0] - pis = pis[1:] - - pieceInfos[pu] = pis - if len(pis) == 0 { - delete(pieceInfos, pu) - } - - pieceInfoLk.Unlock() - - start := time.Now() - - pieceData := io.LimitReader(io.MultiReader( - pi.data, - nullreader.Reader{}, - ), int64(pi.size)) - - n, err := io.Copy(w, pieceData) - close(pi.done) - - took := time.Since(start) - mbps := float64(n) / (1024 * 1024) / took.Seconds() - - if err != nil { - log.Errorf("copying piece data: %s", err) - return - } - - log.Infow("piece served", "piece_uuid", pu, "size", float64(n)/(1024*1024), "duration", took, "speed", mbps) - } - - finalApi := proxy.LoggingAPI[lapi.StorageMiner, lapi.StorageMinerStruct](&ast) - - mh, err := MinerHandler(finalApi, false) // todo permissioned - if err != nil { - return err - } - - mux := http.NewServeMux() - mux.Handle("/piece", pieceHandler) - mux.Handle("/", mh) // todo: create a method for sealNow for sectors - - server := &http.Server{ - Addr: listen, - Handler: mux, - ReadTimeout: 48 * time.Hour, - WriteTimeout: 48 * time.Hour, // really high because we block until pieces are saved in PiecePark - } - - return server.ListenAndServe() -} - -type pieceInfo struct { - data storiface.Data - size abi.UnpaddedPieceSize - - done chan struct{} -} - -type PieceIngester interface { - AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (lapi.SectorOffset, error) -} - -func sectorAddPieceToAnyOperation(maddr address.Address, rootUrl url.URL, conf *config.CurioConfig, pieceInfoLk *sync.Mutex, pieceInfos map[uuid.UUID][]pieceInfo, pin PieceIngester, db *harmonydb.DB, ssize abi.SectorSize) func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (lapi.SectorOffset, error) { - return func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (lapi.SectorOffset, error) { - if (deal.PieceActivationManifest == nil && deal.DealProposal == nil) || (deal.PieceActivationManifest != nil && deal.DealProposal != nil) { - return lapi.SectorOffset{}, xerrors.Errorf("deal info must have either deal proposal or piece manifest") - } - - origPieceData := pieceData - defer func() { - closer, ok := origPieceData.(io.Closer) - if !ok { - log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData) - return - } - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - }() - - pi := pieceInfo{ - data: pieceData, - size: pieceSize, - - done: make(chan struct{}), - } - - pieceUUID := uuid.New() - - if deal.DealProposal != nil { - log.Infow("piece assign request", "piece_cid", deal.PieceCID().String(), "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID) - } - - pieceInfoLk.Lock() - pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi) - pieceInfoLk.Unlock() - - // /piece?piece_cid=xxxx - dataUrl := rootUrl - dataUrl.Path = "/piece" - dataUrl.RawQuery = "piece_id=" + pieceUUID.String() - - // add piece entry - refID, pieceWasCreated, err := addPieceEntry(ctx, db, conf, deal, pieceSize, dataUrl, ssize) - if err != nil { - return lapi.SectorOffset{}, err - } - - // wait for piece to be parked - if pieceWasCreated { - <-pi.done - } else { - // If the piece was not created, we need to close the done channel - close(pi.done) - - closeDataReader(pieceData) - } - - { - // piece park is either done or currently happening from another AP call - // now we need to make sure that the piece is definitely parked successfully - // - in case of errors we return, and boost should be able to retry the call - - // * If piece is completed, return - // * If piece is not completed but has null taskID, wait - // * If piece has a non-null taskID - // * If the task is in harmony_tasks, wait - // * Otherwise look for an error in harmony_task_history and return that - - for { - var taskID *int64 - var complete bool - err := db.QueryRow(ctx, `SELECT pp.task_id, pp.complete - FROM parked_pieces pp - JOIN parked_piece_refs ppr ON pp.id = ppr.piece_id - WHERE ppr.ref_id = $1;`, refID).Scan(&taskID, &complete) - if err != nil { - return lapi.SectorOffset{}, xerrors.Errorf("getting piece park status: %w", err) - } - - if complete { - break - } - - if taskID == nil { - // piece is not parked yet - time.Sleep(5 * time.Second) - continue - } - - // check if task is in harmony_tasks - var taskName string - err = db.QueryRow(ctx, `SELECT name FROM harmony_task WHERE id = $1`, *taskID).Scan(&taskName) - if err == nil { - // task is in harmony_tasks, wait - time.Sleep(5 * time.Second) - continue - } - if err != pgx.ErrNoRows { - return lapi.SectorOffset{}, xerrors.Errorf("checking park-piece task in harmony_tasks: %w", err) - } - - // task is not in harmony_tasks, check harmony_task_history (latest work_end) - var taskError string - var taskResult bool - err = db.QueryRow(ctx, `SELECT result, err FROM harmony_task_history WHERE task_id = $1 ORDER BY work_end DESC LIMIT 1`, *taskID).Scan(&taskResult, &taskError) - if err != nil { - return lapi.SectorOffset{}, xerrors.Errorf("checking park-piece task history: %w", err) - } - if !taskResult { - return lapi.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError) - } - return lapi.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete") - } - } - - pieceIDUrl := url.URL{ - Scheme: "pieceref", - Opaque: fmt.Sprintf("%d", refID), - } - - // make a sector - so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil) - if err != nil { - return lapi.SectorOffset{}, err - } - - log.Infow("piece assigned to sector", "piece_cid", deal.PieceCID().String(), "sector", so.Sector, "offset", so.Offset) - - return so, nil - } -} - -func addPieceEntry(ctx context.Context, db *harmonydb.DB, conf *config.CurioConfig, deal lpiece.PieceDealInfo, pieceSize abi.UnpaddedPieceSize, dataUrl url.URL, ssize abi.SectorSize) (int64, bool, error) { - var refID int64 - var pieceWasCreated bool - - for { - var backpressureWait bool - - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // BACKPRESSURE - wait, err := maybeApplyBackpressure(tx, conf.Ingest, ssize) - if err != nil { - return false, xerrors.Errorf("backpressure checks: %w", err) - } - if wait { - backpressureWait = true - return false, nil - } - - var pieceID int64 - // Attempt to select the piece ID first - err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.PieceCID().String()).Scan(&pieceID) - - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - // Piece does not exist, attempt to insert - err = tx.QueryRow(` - INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size) - VALUES ($1, $2, $3) - ON CONFLICT (piece_cid) DO NOTHING - RETURNING id`, deal.PieceCID().String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID) - if err != nil { - return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) - } - pieceWasCreated = true // New piece was created - } else { - // Some other error occurred during select - return false, xerrors.Errorf("checking existing parked piece: %w", err) - } - } else { - pieceWasCreated = false // Piece already exists, no new piece was created - } - - // Add parked_piece_ref - err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url) - VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID) - if err != nil { - return false, xerrors.Errorf("inserting parked piece ref: %w", err) - } - - // If everything went well, commit the transaction - return true, nil // This will commit the transaction - }, harmonydb.OptionRetry()) - if err != nil { - return refID, pieceWasCreated, xerrors.Errorf("inserting parked piece: %w", err) - } - if !comm { - if backpressureWait { - // Backpressure was applied, wait and try again - select { - case <-time.After(backpressureWaitTime): - case <-ctx.Done(): - return refID, pieceWasCreated, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err()) - } - continue - } - - return refID, pieceWasCreated, xerrors.Errorf("piece tx didn't commit") - } - - break - } - return refID, pieceWasCreated, nil -} - -func closeDataReader(pieceData storiface.Data) { - go func() { - // close the data reader (drain to eof if it's not a closer) - if closer, ok := pieceData.(io.Closer); ok { - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - } else { - log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData)) - - _, err := io.Copy(io.Discard, pieceData) - if err != nil { - log.Warnw("draining pieceData in DataCid", "error", err) - } - } - }() -} - -func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig, ssize abi.SectorSize) (wait bool, err error) { - var pieceSizes []abi.PaddedPieceSize - - err = tx.Select(&pieceSizes, `SELECT piece_padded_size FROM parked_pieces WHERE complete = false;`) - if err != nil { - return false, xerrors.Errorf("getting in-process pieces: %w", err) - } - sectors := sectorCount(pieceSizes, abi.PaddedPieceSize(ssize)) - - if cfg.DoSnap { - var bufferedEncode, bufferedProve, waitDealSectors int - err = tx.QueryRow(` - WITH BufferedEncode AS ( - SELECT COUNT(p.task_id_encode) - COUNT(t.owner_id) AS buffered_encode - FROM sectors_snap_pipeline p - LEFT JOIN harmony_task t ON p.task_id_encode = t.id - WHERE p.after_encode = false - ), - BufferedProve AS ( - SELECT COUNT(p.task_id_prove) - COUNT(t.owner_id) AS buffered_prove - FROM sectors_snap_pipeline p - LEFT JOIN harmony_task t ON p.task_id_prove = t.id - WHERE p.after_prove = true AND p.after_move_storage = false - ), - WaitDealSectors AS ( - SELECT COUNT(DISTINCT sip.sector_number) AS wait_deal_sectors_count - FROM sectors_snap_initial_pieces sip - LEFT JOIN curio.sectors_snap_pipeline sp ON sip.sp_id = sp.sp_id AND sip.sector_number = sp.sector_number - WHERE sp.sector_number IS NULL - ) - SELECT - (SELECT buffered_encode FROM BufferedEncode) AS total_encode, - (SELECT buffered_prove FROM BufferedProve) AS buffered_prove, - (SELECT wait_deal_sectors_count FROM WaitDealSectors) AS wait_deal_sectors_count - `).Scan(&bufferedEncode, &bufferedProve, &waitDealSectors) - if err != nil { - return false, xerrors.Errorf("counting buffered sectors: %w", err) - } - - if cfg.MaxQueueDealSector != 0 && waitDealSectors+sectors > cfg.MaxQueueDealSector { - log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "parking-sectors", sectors, "parking-pieces", len(pieceSizes), "max", cfg.MaxQueueDealSector) - return true, nil - } - - if cfg.MaxQueueSnapEncode != 0 && bufferedEncode > cfg.MaxQueueSnapEncode { - log.Infow("backpressure", "reason", "too many encode tasks", "buffered", bufferedEncode, "max", cfg.MaxQueueSnapEncode) - return true, nil - } - - if cfg.MaxQueueSnapProve != 0 && bufferedProve > cfg.MaxQueueSnapProve { - log.Infow("backpressure", "reason", "too many prove tasks", "buffered", bufferedProve, "max", cfg.MaxQueueSnapProve) - return - } - } else { - var bufferedSDR, bufferedTrees, bufferedPoRep, waitDealSectors int - err = tx.QueryRow(` - WITH BufferedSDR AS ( - SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_sdr_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_sdr = t.id - WHERE p.after_sdr = false - ), - BufferedTrees AS ( - SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id - WHERE p.after_sdr = true AND p.after_tree_r = false - ), - BufferedPoRep AS ( - SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_porep = t.id - WHERE p.after_tree_r = true AND p.after_porep = false - ), - WaitDealSectors AS ( - SELECT COUNT(DISTINCT sip.sector_number) AS wait_deal_sectors_count - FROM sectors_sdr_initial_pieces sip - LEFT JOIN sectors_sdr_pipeline sp ON sip.sp_id = sp.sp_id AND sip.sector_number = sp.sector_number - WHERE sp.sector_number IS NULL - ) - SELECT - (SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered_sdr, - (SELECT buffered_trees_count FROM BufferedTrees) AS buffered_trees_count, - (SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count, - (SELECT wait_deal_sectors_count FROM WaitDealSectors) AS wait_deal_sectors_count - `).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep, &waitDealSectors) - if err != nil { - return false, xerrors.Errorf("counting buffered sectors: %w", err) - } - - if cfg.MaxQueueDealSector != 0 && waitDealSectors+sectors > cfg.MaxQueueDealSector { - log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector) - return true, nil - } - - if bufferedSDR > cfg.MaxQueueSDR { - log.Infow("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR) - return true, nil - } - if cfg.MaxQueueTrees != 0 && bufferedTrees > cfg.MaxQueueTrees { - log.Infow("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees) - return true, nil - } - if cfg.MaxQueuePoRep != 0 && bufferedPoRep > cfg.MaxQueuePoRep { - log.Infow("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep) - return true, nil - } - } - - return false, nil -} - -func sectorCount(sizes []abi.PaddedPieceSize, targetSize abi.PaddedPieceSize) int { - sort.Slice(sizes, func(i, j int) bool { - return sizes[i] > sizes[j] - }) - - sectors := make([]abi.PaddedPieceSize, 0) - - for _, size := range sizes { - placed := false - for i := range sectors { - if sectors[i]+size <= targetSize { - sectors[i] += size - placed = true - break - } - } - if !placed { - sectors = append(sectors, size) - } - } - - return len(sectors) -} diff --git a/market/lmrpc/minerhandler.go b/market/lmrpc/minerhandler.go deleted file mode 100644 index 8bb2ab907..000000000 --- a/market/lmrpc/minerhandler.go +++ /dev/null @@ -1,66 +0,0 @@ -package lmrpc - -import ( - "net/http" - _ "net/http/pprof" - - "github.com/gorilla/mux" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/lib/rpcenc" - "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/node/impl" -) - -// MinerHandler returns a miner handler, to be mounted as-is on the server. -func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) { - mapi := proxy.MetricedStorMinerAPI(a) - if permissioned { - mapi = api.PermissionedStorMinerAPI(mapi) - } - - readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() - rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt) - rpcServer.Register("Filecoin", mapi) - rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover") - - rootMux := mux.NewRouter() - - // remote storage - if _, realImpl := a.(*impl.StorageMinerAPI); realImpl { - m := mux.NewRouter() - m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned)) - - var hnd http.Handler = m - if permissioned { - hnd = &auth.Handler{ - Verify: a.StorageAuthVerify, - Next: m.ServeHTTP, - } - } - - rootMux.PathPrefix("/remote").Handler(hnd) - } - - // local APIs - { - m := mux.NewRouter() - m.Handle("/rpc/v0", rpcServer) - m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) - - var hnd http.Handler = m - if permissioned { - hnd = &auth.Handler{ - Verify: a.AuthVerify, - Next: m.ServeHTTP, - } - } - - rootMux.PathPrefix("/").Handler(hnd) - } - - return rootMux, nil -} diff --git a/market/mk12/legacytypes/legacytypes.go b/market/mk12/legacytypes/legacytypes.go new file mode 100644 index 000000000..5ce4e3fa4 --- /dev/null +++ b/market/mk12/legacytypes/legacytypes.go @@ -0,0 +1,69 @@ +package legacytypes + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" +) + +//go:generate cbor-gen-for --map-encoding SignedStorageAsk StorageAsk Balance AskRequest AskResponse + +// AskProtocolID is the ID for the libp2p protocol for querying miners for their current StorageAsk. +const AskProtocolID = "/fil/storage/ask/1.1.0" + +// StorageAsk defines the parameters by which a miner will choose to accept or +// reject a deal. Note: making a storage deal proposal which matches the miner's +// ask is a precondition, but not sufficient to ensure the deal is accepted (the +// storage provider may run its own decision logic). +type StorageAsk struct { + // Price per GiB / Epoch + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk is an ask signed by the miner's private key +type SignedStorageAsk struct { + Ask *StorageAsk + Signature *crypto.Signature +} + +// StorageAskOption allows custom configuration of a storage ask +type StorageAskOption func(*StorageAsk) + +// MinPieceSize configures a minimum piece size of a StorageAsk +func MinPieceSize(minPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MinPieceSize = minPieceSize + } +} + +// MaxPieceSize configures maxiumum piece size of a StorageAsk +func MaxPieceSize(maxPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MaxPieceSize = maxPieceSize + } +} + +// Balance represents a current balance of funds in the StorageMarketActor. +type Balance struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// AskRequest is a request for current ask parameters for a given miner +type AskRequest struct { + Miner address.Address +} + +// AskResponse is the response sent over the network in response +// to an ask request +type AskResponse struct { + Ask *SignedStorageAsk +} diff --git a/market/mk12/legacytypes/legacytypes_cbor_gen.go b/market/mk12/legacytypes/legacytypes_cbor_gen.go new file mode 100644 index 000000000..431e51c50 --- /dev/null +++ b/market/mk12/legacytypes/legacytypes_cbor_gen.go @@ -0,0 +1,780 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package legacytypes + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Ask (legacytypes.StorageAsk) (struct) + if len("Ask") > 8192 { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > 8192 { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedStorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (legacytypes.StorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *StorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{168}); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > 8192 { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Price (big.Int) (struct) + if len("Price") > 8192 { + return xerrors.Errorf("Value in field \"Price\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Price"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Price")); err != nil { + return err + } + + if err := t.Price.MarshalCBOR(cw); err != nil { + return err + } + + // t.SeqNo (uint64) (uint64) + if len("SeqNo") > 8192 { + return xerrors.Errorf("Value in field \"SeqNo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SeqNo"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SeqNo")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + // t.Expiry (abi.ChainEpoch) (int64) + if len("Expiry") > 8192 { + return xerrors.Errorf("Value in field \"Expiry\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Expiry"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Expiry")); err != nil { + return err + } + + if t.Expiry >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if len("Timestamp") > 8192 { + return xerrors.Errorf("Value in field \"Timestamp\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Timestamp"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Timestamp")); err != nil { + return err + } + + if t.Timestamp >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + if len("MaxPieceSize") > 8192 { + return xerrors.Errorf("Value in field \"MaxPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPieceSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MaxPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + if len("MinPieceSize") > 8192 { + return xerrors.Errorf("Value in field \"MinPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinPieceSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("MinPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if len("VerifiedPrice") > 8192 { + return xerrors.Errorf("Value in field \"VerifiedPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VerifiedPrice"))); err != nil { + return err + } + if _, err := cw.WriteString(string("VerifiedPrice")); err != nil { + return err + } + + if err := t.VerifiedPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *StorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("StorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Price (big.Int) (struct) + case "Price": + + { + + if err := t.Price.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.SeqNo (uint64) (uint64) + case "SeqNo": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + // t.Expiry (abi.ChainEpoch) (int64) + case "Expiry": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.Timestamp (abi.ChainEpoch) (int64) + case "Timestamp": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + case "MaxPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + case "MinPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.VerifiedPrice (big.Int) (struct) + case "VerifiedPrice": + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Balance) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Locked (big.Int) (struct) + if len("Locked") > 8192 { + return xerrors.Errorf("Value in field \"Locked\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Locked"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Locked")); err != nil { + return err + } + + if err := t.Locked.MarshalCBOR(cw); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if len("Available") > 8192 { + return xerrors.Errorf("Value in field \"Available\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Available"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Available")); err != nil { + return err + } + + if err := t.Available.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Balance) UnmarshalCBOR(r io.Reader) (err error) { + *t = Balance{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Balance: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Locked (big.Int) (struct) + case "Locked": + + { + + if err := t.Locked.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + case "Available": + + { + + if err := t.Available.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *AskRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > 8192 { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *AskResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Ask (legacytypes.SignedStorageAsk) (struct) + if len("Ask") > 8192 { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (legacytypes.SignedStorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(SignedStorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/market/mk12/mk12.go b/market/mk12/mk12.go new file mode 100644 index 000000000..65cd50b71 --- /dev/null +++ b/market/mk12/mk12.go @@ -0,0 +1,463 @@ +// Package market +/* +This File contains all the implementation details of how to handle +the mk1.2 deals. +*/ +package mk12 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/samber/lo" + "github.com/yugabyte/pgx/v5" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v13/miner" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/market/mk12/legacytypes" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + ctypes "github.com/filecoin-project/lotus/chain/types" +) + +type MK12API interface { + ChainHead(context.Context) (*types.TipSet, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) + StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) +} + +type MK12 struct { + miners []address.Address + db *harmonydb.DB + api MK12API + sc *ffi.SealCalls +} + +type validationError struct { + error + // The reason sent to the client for why validation failed + reason string +} + +func NewMK12Handler(miners []address.Address, db *harmonydb.DB, sc *ffi.SealCalls, mapi MK12API) (*MK12, error) { + return &MK12{ + miners: miners, + db: db, + api: mapi, + sc: sc, + }, nil +} + +// ExecuteDeal is called when the Storage Provider receives a deal proposal +// from the network +func (m *MK12) ExecuteDeal(ctx context.Context, dp *DealParams, clientPeer peer.ID) (*ProviderDealRejectionInfo, error) { + + ds := &ProviderDealState{ + DealUuid: dp.DealUUID, + ClientDealProposal: dp.ClientDealProposal, + ClientPeerID: clientPeer, + DealDataRoot: dp.DealDataRoot, + Transfer: dp.Transfer, + IsOffline: dp.IsOffline, + CleanupData: !dp.IsOffline, + FastRetrieval: !dp.RemoveUnsealedCopy, + AnnounceToIPNI: !dp.SkipIPNIAnnounce, + } + + spc, err := ds.GetSignedProposalCid() + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("getting signed proposal cid: %s", err.Error()), + }, nil + } + + ds.SignedProposalCID = spc + + // Validate the deal proposal + if err := m.validateDealProposal(ctx, ds); err != nil { + // Send the client a reason for the rejection that doesn't reveal the + // internal error message + reason := err.reason + if reason == "" { + reason = err.Error() + } + + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("failed validation: %s", reason), + }, nil + } + + return m.processDeal(ctx, ds) +} + +// ValidateDealProposal validates a proposed deal against the provider criteria. +// It returns a validationError. If a nicer error message should be sent to the +// client, the reason string will be set to that nicer error message. +func (m *MK12) validateDealProposal(ctx context.Context, deal *ProviderDealState) *validationError { + head, err := m.api.ChainHead(ctx) + if err != nil { + return &validationError{ + reason: "server error: getting chain head", + error: fmt.Errorf("node error getting most recent state id: %w", err), + } + } + + tok := head.Key().Bytes() + curEpoch := head.Height() + + // Check that the proposal piece cid is defined before attempting signature + // validation - if it's not defined, it won't be possible to marshall the + // deal proposal to check the signature + proposal := deal.ClientDealProposal.Proposal + if !proposal.PieceCID.Defined() { + return &validationError{error: fmt.Errorf("proposal PieceCID undefined")} + } + + if ok, err := m.validateClientSignature(ctx, deal); err != nil || !ok { + if err != nil { + return &validationError{ + reason: "server error: validating signature", + error: fmt.Errorf("validateSignature failed: %w", err), + } + } + return &validationError{ + reason: "invalid signature", + error: fmt.Errorf("invalid signature"), + } + } + + // validate deal proposal + if !lo.Contains(m.miners, proposal.Provider) { + err := fmt.Errorf("incorrect provider for deal; proposal.Provider: %s; provider.Address: %s", proposal.Provider, m.miners) + return &validationError{error: err} + } + + if proposal.Label.Length() > DealMaxLabelSize { + err := fmt.Errorf("deal label can be at most %d bytes, is %d", DealMaxLabelSize, proposal.Label.Length()) + return &validationError{error: err} + } + + if err := proposal.PieceSize.Validate(); err != nil { + err := fmt.Errorf("proposal piece size is invalid: %w", err) + return &validationError{error: err} + } + + if proposal.PieceCID.Prefix() != market.PieceCIDPrefix { + err := fmt.Errorf("proposal PieceCID had wrong prefix") + return &validationError{error: err} + } + + if proposal.EndEpoch <= proposal.StartEpoch { + err := fmt.Errorf("proposal end %d before proposal start %d", proposal.EndEpoch, proposal.StartEpoch) + return &validationError{error: err} + } + + if curEpoch > proposal.StartEpoch { + err := fmt.Errorf("deal start epoch %d has already elapsed (current epoch: %d)", proposal.StartEpoch, curEpoch) + return &validationError{error: err} + } + + // Check that the delta between the start and end epochs (the deal + // duration) is within acceptable bounds + minDuration, maxDuration := market.DealDurationBounds(proposal.PieceSize) + if proposal.Duration() < minDuration || proposal.Duration() > maxDuration { + err := fmt.Errorf("deal duration out of bounds (min, max, provided): %d, %d, %d", minDuration, maxDuration, proposal.Duration()) + return &validationError{error: err} + } + + // Check that the proposed end epoch isn't too far beyond the current epoch + maxEndEpoch := curEpoch + miner.MaxSectorExpirationExtension + if proposal.EndEpoch > maxEndEpoch { + err := fmt.Errorf("invalid deal end epoch %d: cannot be more than %d past current epoch %d", proposal.EndEpoch, miner.MaxSectorExpirationExtension, curEpoch) + return &validationError{error: err} + } + + bounds, err := m.api.StateDealProviderCollateralBounds(ctx, proposal.PieceSize, proposal.VerifiedDeal, ctypes.EmptyTSK) + if err != nil { + return &validationError{ + reason: "server error: getting collateral bounds", + error: fmt.Errorf("node error getting collateral bounds: %w", err), + } + } + + // The maximum amount of collateral that the provider will put into escrow + // for a deal is calculated as a multiple of the minimum bounded amount + maxC := ctypes.BigMul(bounds.Min, ctypes.NewInt(maxDealCollateralMultiplier)) + + pcMin := bounds.Min + pcMax := maxC + + if proposal.ProviderCollateral.LessThan(pcMin) { + err := fmt.Errorf("proposed provider collateral %s below minimum %s", proposal.ProviderCollateral, pcMin) + return &validationError{error: err} + } + + if proposal.ProviderCollateral.GreaterThan(pcMax) { + err := fmt.Errorf("proposed provider collateral %s above maximum %s", proposal.ProviderCollateral, pcMax) + return &validationError{error: err} + } + + if err := m.validateAsk(ctx, deal); err != nil { + return &validationError{error: err} + } + + tsk, err := ctypes.TipSetKeyFromBytes(tok) + if err != nil { + return &validationError{ + reason: "server error: tip set key from bytes", + error: err, + } + } + + bal, err := m.api.StateMarketBalance(ctx, proposal.Client, tsk) + if err != nil { + return &validationError{ + reason: "server error: getting market balance", + error: fmt.Errorf("node error getting client market balance failed: %w", err), + } + } + + clientMarketBalance := ToSharedBalance(bal) + + // This doesn't guarantee that the client won't withdraw / lock those funds + // but it's a decent first filter + if clientMarketBalance.Available.LessThan(proposal.ClientBalanceRequirement()) { + err := fmt.Errorf("client available funds in escrow %d not enough to meet storage cost for deal %d", clientMarketBalance.Available, proposal.ClientBalanceRequirement()) + return &validationError{error: err} + } + + // Verified deal checks + if proposal.VerifiedDeal { + // Get data cap + dataCap, err := m.api.StateVerifiedClientStatus(ctx, proposal.Client, tsk) + if err != nil { + return &validationError{ + reason: "server error: getting verified datacap", + error: fmt.Errorf("node error fetching verified data cap: %w", err), + } + } + + if dataCap == nil { + return &validationError{ + reason: "client is not a verified client", + error: errors.New("node error fetching verified data cap: data cap missing -- client not verified"), + } + } + + pieceSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) + if dataCap.LessThan(pieceSize) { + err := fmt.Errorf("verified deal DataCap %d too small for proposed piece size %d", dataCap, pieceSize) + return &validationError{error: err} + } + } + + return nil +} + +func (m *MK12) validateAsk(ctx context.Context, deal *ProviderDealState) error { + sask, err := m.GetAsk(ctx, deal.ClientDealProposal.Proposal.Provider) + if err != nil { + return xerrors.Errorf("getting ask for miner %s: %w", deal.ClientDealProposal.Proposal.Provider.String(), err) + } + + ask := sask.Ask + + askPrice := ask.Price + if deal.ClientDealProposal.Proposal.VerifiedDeal { + askPrice = ask.VerifiedPrice + } + + proposal := deal.ClientDealProposal.Proposal + minPrice := big.Div(big.Mul(askPrice, abi.NewTokenAmount(int64(proposal.PieceSize))), abi.NewTokenAmount(1<<30)) + if proposal.StoragePricePerEpoch.LessThan(minPrice) { + return fmt.Errorf("storage price per epoch less than asking price: %s < %s", proposal.StoragePricePerEpoch, minPrice) + } + + if proposal.PieceSize < ask.MinPieceSize { + return fmt.Errorf("piece size less than minimum required size: %d < %d", proposal.PieceSize, ask.MinPieceSize) + } + + if proposal.PieceSize > ask.MaxPieceSize { + return fmt.Errorf("piece size more than maximum allowed size: %d > %d", proposal.PieceSize, ask.MaxPieceSize) + } + + return nil +} + +func ToSharedBalance(bal api.MarketBalance) legacytypes.Balance { + return legacytypes.Balance{ + Locked: bal.Locked, + Available: big.Sub(bal.Escrow, bal.Locked), + } +} + +func (m *MK12) validateClientSignature(ctx context.Context, deal *ProviderDealState) (bool, error) { + b, err := cborutil.Dump(&deal.ClientDealProposal.Proposal) + if err != nil { + return false, xerrors.Errorf("failed to serialize client deal proposal: %w", err) + } + + verified, err := m.verifySignature(ctx, deal.ClientDealProposal.ClientSignature, deal.ClientDealProposal.Proposal.Client, b) + if err != nil { + return false, xerrors.Errorf("error verifying signature: %w", err) + } + return verified, nil +} + +func (m *MK12) processDeal(ctx context.Context, deal *ProviderDealState) (*ProviderDealRejectionInfo, error) { + // TODO: Add deal filters and Backpressure + + if deal.Transfer.Type == Libp2pScheme { + return &ProviderDealRejectionInfo{ + Reason: "libp2p URLs are not supported by this provider", + }, nil + } + + propJson, err := json.Marshal(deal.ClientDealProposal.Proposal) + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("json.Marshal(piece.DealProposal): %s", err), + }, nil + } + + sigByte, err := deal.ClientDealProposal.ClientSignature.MarshalBinary() + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("marshal client signature: %s", err), + }, nil + } + + prop := deal.ClientDealProposal.Proposal + + mid, err := address.IDFromAddress(prop.Provider) + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("address.IDFromAddress: %s", err), + }, nil + } + + // de-serialize transport opaque token + tInfo := &HttpRequest{} + if err := json.Unmarshal(deal.Transfer.Params, tInfo); err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("failed to de-serialize transport params bytes '%s': %s", string(deal.Transfer.Params), err), + }, nil + } + + headers, err := json.Marshal(tInfo.Headers) + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("failed to marshal headers: %s", err), + }, nil + } + + comm, err := m.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`INSERT INTO market_mk12_deals (uuid, signed_proposal_cid, + proposal_signature, proposal, piece_cid, + piece_size, offline, verified, sp_id, start_epoch, end_epoch, + client_peer_id, fast_retrieval, announce_to_ipni, url, url_headers) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) + ON CONFLICT (uuid) DO NOTHING`, + deal.DealUuid.String(), deal.SignedProposalCID.String(), sigByte, propJson, prop.PieceCID.String(), + prop.PieceSize, deal.IsOffline, prop.VerifiedDeal, mid, prop.StartEpoch, prop.EndEpoch, deal.ClientPeerID.String(), + deal.FastRetrieval, deal.AnnounceToIPNI, tInfo.URL, headers) + + if err != nil { + return false, xerrors.Errorf("store deal success: %w", err) + } + + if n != 1 { + return false, xerrors.Errorf("store deal success: updated %d rows instead of 1", n) + } + + // Create piece park entry for online deals + if !deal.IsOffline { + var pieceID int64 + // Attempt to select the piece ID first + err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, prop.PieceCID.String()).Scan(&pieceID) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + // Piece does not exist, attempt to insert + err = tx.QueryRow(` + INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size) + VALUES ($1, $2, $3) + ON CONFLICT (piece_cid) DO NOTHING + RETURNING id`, prop.PieceCID.String(), int64(prop.PieceSize), int64(deal.Transfer.Size)).Scan(&pieceID) + if err != nil { + return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) + } + } else { + // Some other error occurred during select + return false, xerrors.Errorf("checking existing parked piece: %w", err) + } + } + + // Add parked_piece_ref + var refID int64 + err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url, data_headers) + VALUES ($1, $2, $3) RETURNING ref_id`, pieceID, tInfo.URL, headers).Scan(&refID) + if err != nil { + return false, xerrors.Errorf("inserting parked piece ref: %w", err) + } + + pieceIDUrl := url.URL{ + Scheme: "pieceref", + Opaque: fmt.Sprintf("%d", refID), + } + + _, err = tx.Exec(`INSERT INTO market_mk12_deal_pipeline (uuid, sp_id, piece_cid, piece_size, offline, url, raw_size, should_index) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (uuid) DO NOTHING`, + deal.DealUuid.String(), mid, prop.PieceCID.String(), prop.PieceSize, deal.IsOffline, pieceIDUrl, deal.Transfer.Size, + deal.FastRetrieval) + if err != nil { + return false, xerrors.Errorf("inserting deal into deal pipeline: %w", err) + } + + } else { + // Insert the offline deal into the deal pipeline + _, err = tx.Exec(`INSERT INTO market_mk12_deal_pipeline (uuid, sp_id, piece_cid, piece_size, offline, should_index) + VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (uuid) DO NOTHING`, + deal.DealUuid.String(), mid, prop.PieceCID.String(), prop.PieceSize, deal.IsOffline, deal.FastRetrieval) + if err != nil { + return false, xerrors.Errorf("inserting deal into deal pipeline: %w", err) + } + } + + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return &ProviderDealRejectionInfo{ + Reason: fmt.Sprintf("store deal: %s", err.Error()), + }, nil + } + if !comm { + return &ProviderDealRejectionInfo{ + Reason: "store deal: could not commit the transaction", + }, nil + } + + return &ProviderDealRejectionInfo{ + Accepted: true, + }, nil +} diff --git a/market/mk12/mk12_utils.go b/market/mk12/mk12_utils.go new file mode 100644 index 000000000..052c6429c --- /dev/null +++ b/market/mk12/mk12_utils.go @@ -0,0 +1,220 @@ +package mk12 + +import ( + "bytes" + "context" + "fmt" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v9/account" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/curio/market/mk12/legacytypes" + + ctypes "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" +) + +const DealMaxLabelSize = 256 + +const maxDealCollateralMultiplier = 2 + +// DefaultPrice is the default price for unverified deals (in attoFil / GiB / Epoch) +var DefaultPrice = abi.NewTokenAmount(50000000) + +// DefaultVerifiedPrice is the default price for verified deals (in attoFil / GiB / Epoch) +var DefaultVerifiedPrice = abi.NewTokenAmount(5000000) + +// DefaultDuration is the default number of epochs a storage ask is in effect for +const DefaultDuration abi.ChainEpoch = 1000000 + +// DefaultMinPieceSize is the minimum accepted piece size for data +const DefaultMinPieceSize abi.PaddedPieceSize = 16 << 30 + +// DefaultMaxPieceSize is the default maximum accepted size for pieces for deals +// TODO: It would be nice to default this to the miner's sector size +const DefaultMaxPieceSize abi.PaddedPieceSize = 32 << 30 + +func (m *MK12) GetAsk(ctx context.Context, miner address.Address) (*legacytypes.SignedStorageAsk, error) { + + minerid, err := address.IDFromAddress(miner) + if err != nil { + return nil, err + } + + var asks []struct { + Price int64 `db:"price"` + VerifiedPrice int64 `db:"verified_price"` + MinPieceSize int64 `db:"min_size"` + MaxPieceSize int64 `db:"max_size"` + Miner int64 `db:"sp_id"` + Timestamp int64 `db:"created_at"` + Expiry int64 `db:"expiry"` + SeqNo int64 `db:"sequence"` + } + + err = m.db.Select(ctx, &asks, `SELECT sp_id, price, verified_price, min_size, max_size, created_at, expiry, sequence + FROM market_mk12_storage_ask WHERE sp_id = $1`, minerid) + + if err != nil { + return nil, xerrors.Errorf("getting ask from database: %w", err) + } + + if len(asks) == 0 { + return nil, xerrors.Errorf("no ask found for the given miner") + } + + ask := &legacytypes.StorageAsk{ + Price: big.NewInt(asks[0].Price), + VerifiedPrice: big.NewInt(asks[0].VerifiedPrice), + MinPieceSize: abi.PaddedPieceSize(asks[0].MinPieceSize), + MaxPieceSize: abi.PaddedPieceSize(asks[0].MaxPieceSize), + Miner: miner, + Timestamp: abi.ChainEpoch(asks[0].Timestamp), + Expiry: abi.ChainEpoch(asks[0].Expiry), + SeqNo: uint64(asks[0].SeqNo), + } + + tok, err := m.api.ChainHead(ctx) + if err != nil { + return nil, err + } + + msg, err := cborutil.Dump(ask) + if err != nil { + return nil, xerrors.Errorf("serializing: %w", err) + } + + mi, err := m.api.StateMinerInfo(ctx, ask.Miner, tok.Key()) + if err != nil { + return nil, err + } + + signer, err := m.api.StateAccountKey(ctx, mi.Worker, tok.Key()) + if err != nil { + return nil, err + } + + sig, err := m.api.WalletSign(ctx, signer, msg) + if err != nil { + return nil, err + } + + ret := &legacytypes.SignedStorageAsk{ + Ask: ask, + Signature: sig, + } + + return ret, nil + +} + +func (m *MK12) SetAsk(ctx context.Context, price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, miner address.Address, options ...legacytypes.StorageAskOption) error { + + spid, err := address.IDFromAddress(miner) + if err != nil { + return xerrors.Errorf("getting miner id from address: %w", err) + } + + var seqnos []uint64 + err = m.db.Select(ctx, &seqnos, `SELECT sequence + FROM market_mk12_storage_ask WHERE sp_id = $1`, spid) + + if err != nil { + return xerrors.Errorf("getting sequence from DB: %w", err) + } + + minPieceSize := DefaultMinPieceSize + maxPieceSize := DefaultMaxPieceSize + + ts, err := m.api.ChainHead(ctx) + if err != nil { + return err + } + ask := &legacytypes.StorageAsk{ + Price: price, + VerifiedPrice: verifiedPrice, + Timestamp: ts.Height(), + Expiry: ts.Height() + duration, + Miner: miner, + SeqNo: seqnos[0] + 1, + MinPieceSize: minPieceSize, + MaxPieceSize: maxPieceSize, + } + + for _, option := range options { + option(ask) + } + + n, err := m.db.Exec(ctx, `UPDATE market_mk12_storage_ask SET create_at = $1, expiry = $2, sequence = $3, + price = $4, verified_price = $5, min_size = $6, max_size = $7 + WHERE sp_id = $8`, ask.Timestamp, ask.Expiry, int64(ask.SeqNo), ask.Price, ask.VerifiedPrice, + ask.MinPieceSize, ask.MaxPieceSize, spid) + + if err != nil { + return xerrors.Errorf("store ask success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store ask success: updated %d rows", n) + } + + return nil +} + +func (m *MK12) verifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte) (bool, error) { + addr, err := m.api.StateAccountKey(ctx, addr, ctypes.EmptyTSK) + if err != nil { + return false, err + } + + // Check if the client is an f4 address, ie an FVM contract + clientAddr := addr.String() + if len(clientAddr) >= 2 && (clientAddr[:2] == "t4" || clientAddr[:2] == "f4") { + // Verify authorization by simulating an AuthenticateMessage + return m.verifyContractSignature(ctx, sig, addr, input) + } + + // Otherwise do local signature verification + err = sigs.Verify(&sig, addr, input) + return err == nil, err +} + +// verifyContractSignature simulates sending an AuthenticateMessage to authenticate the signer +func (m *MK12) verifyContractSignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte) (bool, error) { + var params account.AuthenticateMessageParams + params.Message = input + params.Signature = sig.Data + + var msg ctypes.Message + buf := new(bytes.Buffer) + + var err error + err = params.MarshalCBOR(buf) + if err != nil { + return false, err + } + msg.Params = buf.Bytes() + + msg.From = builtin.StorageMarketActorAddr + msg.To = addr + msg.Nonce = 1 + + msg.Method, err = builtin.GenerateFRCMethodNum("AuthenticateMessage") // abi.MethodNum(2643134072) + if err != nil { + return false, err + } + + res, err := m.api.StateCall(ctx, &msg, ctypes.EmptyTSK) + if err != nil { + return false, fmt.Errorf("state call to %s returned an error: %w", addr, err) + } + + return res.MsgRct.ExitCode == exitcode.Ok, nil +} diff --git a/market/mk12/types.go b/market/mk12/types.go new file mode 100644 index 000000000..3070b75f3 --- /dev/null +++ b/market/mk12/types.go @@ -0,0 +1,290 @@ +package mk12 + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strings" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "github.com/ipni/go-libipni/maurl" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/go-state-types/crypto" +) + +const Libp2pScheme = "libp2p" + +//go:generate cbor-gen-for --map-encoding DealParamsV120 DealParams DirectDealParams Transfer DealResponse DealStatusRequest DealStatusResponse DealStatus + +// DealStatusRequest is sent to get the current state of a deal from a +// storage provider +type DealStatusRequest struct { + DealUUID uuid.UUID + Signature crypto.Signature +} + +// DealStatusResponse is the current state of a deal +type DealStatusResponse struct { + DealUUID uuid.UUID + // Error is non-empty if there is an error getting the deal status + // (eg invalid request signature) + Error string + DealStatus *DealStatus + IsOffline bool + TransferSize uint64 + NBytesReceived uint64 +} + +type DealStatus struct { + // Error is non-empty if the deal is in the error state + Error string + // Status is a string corresponding to a deal checkpoint + Status string + // SealingStatus is the sealing status reported by lotus miner + SealingStatus string + // Proposal is the deal proposal + Proposal market.DealProposal + // SignedProposalCid is the cid of the client deal proposal + signature + SignedProposalCid cid.Cid + // PublishCid is the cid of the Publish message sent on chain, if the deal + // has reached the publish stage + PublishCid *cid.Cid + // ChainDealID is the id of the deal in chain state + ChainDealID abi.DealID +} + +type DealParamsV120 struct { + DealUUID uuid.UUID + IsOffline bool + ClientDealProposal market.ClientDealProposal + DealDataRoot cid.Cid + Transfer Transfer +} + +type DealParams struct { + DealUUID uuid.UUID + IsOffline bool + ClientDealProposal market.ClientDealProposal + DealDataRoot cid.Cid + Transfer Transfer // Transfer params will be the zero value if this is an offline deal + RemoveUnsealedCopy bool + SkipIPNIAnnounce bool +} + +type DirectDealParams struct { + DealUUID uuid.UUID + AllocationID verifreg.AllocationId + PieceCid cid.Cid + ClientAddr address.Address + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + FilePath string + DeleteAfterImport bool + RemoveUnsealedCopy bool + SkipIPNIAnnounce bool +} + +// Transfer has the parameters for a data transfer +type Transfer struct { + // The type of transfer eg "http" + Type string + // An optional ID that can be supplied by the client to identify the deal + ClientID string + // A byte array containing marshalled data specific to the transfer type + // eg a JSON encoded struct { URL: "", Headers: {...} } + Params []byte + // The size of the data transferred in bytes + Size uint64 +} + +type TransportUrl struct { + Scheme string + Url string + PeerID peer.ID + Multiaddr multiaddr.Multiaddr +} + +// HttpRequest has parameters for an HTTP transfer +type HttpRequest struct { + // URL can be + // - an http URL: + // "https://example.com/path" + // - a libp2p URL: + // "libp2p:///ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ" + // Must include a Peer ID + URL string + // Headers are the HTTP headers that are sent as part of the request, + // eg "Authorization" + Headers map[string]string +} + +func ParseUrl(urlStr string) (*TransportUrl, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, fmt.Errorf("parsing url '%s': %w", urlStr, err) + } + if u.Scheme == "" { + return nil, fmt.Errorf("parsing url '%s': could not parse scheme", urlStr) + } + if u.Scheme == Libp2pScheme { + return parseLibp2pUrl(urlStr) + } + return &TransportUrl{Scheme: u.Scheme, Url: urlStr}, nil +} + +func parseLibp2pUrl(urlStr string) (*TransportUrl, error) { + // Remove libp2p prefix + prefix := Libp2pScheme + "://" + if !strings.HasPrefix(urlStr, prefix) { + return nil, fmt.Errorf("libp2p URL '%s' must start with prefix '%s'", urlStr, prefix) + } + + // Convert to AddrInfo + addrInfo, err := peer.AddrInfoFromString(urlStr[len(prefix):]) + if err != nil { + return nil, fmt.Errorf("parsing address info from url '%s': %w", urlStr, err) + } + + // There should be exactly one address + if len(addrInfo.Addrs) != 1 { + return nil, fmt.Errorf("expected only one address in url '%s'", urlStr) + } + + return &TransportUrl{ + Scheme: Libp2pScheme, + Url: Libp2pScheme + "://" + addrInfo.ID.String(), + PeerID: addrInfo.ID, + Multiaddr: addrInfo.Addrs[0], + }, nil +} + +func (t *Transfer) Host() (string, error) { + if t.Type != "http" && t.Type != "libp2p" { + return "", fmt.Errorf("cannot parse params for unrecognized transfer type '%s'", t.Type) + } + + // de-serialize transport opaque token + tInfo := &HttpRequest{} + if err := json.Unmarshal(t.Params, tInfo); err != nil { + return "", fmt.Errorf("failed to de-serialize transport params bytes '%s': %w", string(t.Params), err) + } + + // Parse http / multiaddr url + u, err := ParseUrl(tInfo.URL) + if err != nil { + return "", fmt.Errorf("cannot parse url '%s': %w", tInfo.URL, err) + } + + // If the url is in libp2p format + if u.Scheme == Libp2pScheme { + // Get the host from the multiaddr + mahttp, err := maurl.ToURL(u.Multiaddr) + if err != nil { + return "", err + } + return mahttp.Host, nil + } + + // Otherwise parse as an http url + httpUrl, err := url.Parse(u.Url) + if err != nil { + return "", fmt.Errorf("cannot parse url '%s' from '%s': %w", u.Url, tInfo.URL, err) + } + + return httpUrl.Host, nil +} + +type DealResponse struct { + Accepted bool + // Message is the reason the deal proposal was rejected. It is empty if + // the deal was accepted. + Message string +} + +type DealPublisher interface { + Publish(ctx context.Context, deal market.ClientDealProposal) (cid.Cid, error) +} + +// PublishDealsWaitResult is the result of a call to wait for publish deals to +// appear on chain +type PublishDealsWaitResult struct { + DealID abi.DealID + FinalCid cid.Cid +} + +// ProviderDealRejectionInfo is the information sent by the Storage Provider +// to the Client when it accepts or rejects a deal. +type ProviderDealRejectionInfo struct { + Accepted bool + Reason string // The rejection reason, if the deal is rejected +} + +// ProviderDealState is the local state tracked for a deal by the StorageProvider. +type ProviderDealState struct { + // DealUuid is an unique uuid generated by client for the deal. + DealUuid uuid.UUID + // CreatedAt is the time at which the deal was stored + CreatedAt time.Time + // SignedProposalCID is cid for proposal and client signature + SignedProposalCID cid.Cid + // ClientDealProposal is the deal proposal sent by the client. + ClientDealProposal market.ClientDealProposal + // IsOffline is true for offline deals i.e. deals where the actual data to be stored by the SP is sent out of band + // and not via an online data transfer. + IsOffline bool + // CleanupData indicates whether to remove the data for a deal after the deal has been added to a sector. + // This is always true for online deals, and can be set as a flag for offline deals. + CleanupData bool + + // ClientPeerID is the Clients libp2p Peer ID. + ClientPeerID peer.ID + + // DealDataRoot is the root of the IPLD DAG that the client wants to store. + DealDataRoot cid.Cid + + // InboundCARPath is the file-path where the storage provider will persist the CAR file sent by the client. + InboundFilePath string + + // Transfer has the parameters for the data transfer + Transfer Transfer + + // Chain Vars + ChainDealID abi.DealID + PublishCID *cid.Cid + + // sector packing info + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + + // set if there's an error + Err string + + // Keep unsealed copy of the data + FastRetrieval bool + + //Announce deal to the IPNI(Index Provider) + AnnounceToIPNI bool +} + +func (d *ProviderDealState) String() string { + return fmt.Sprintf("%+v", *d) +} + +func (d *ProviderDealState) GetSignedProposalCid() (cid.Cid, error) { + propnd, err := cborutil.AsIpld(&d.ClientDealProposal) + if err != nil { + return cid.Undef, fmt.Errorf("failed to compute signed deal proposal ipld node: %w", err) + } + + return propnd.Cid(), nil +} diff --git a/market/mk12/types_cbor_gen.go b/market/mk12/types_cbor_gen.go new file mode 100644 index 000000000..ec6b1c321 --- /dev/null +++ b/market/mk12/types_cbor_gen.go @@ -0,0 +1,1997 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package mk12 + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *DealParamsV120) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{165}); err != nil { + return err + } + + // t.DealUUID (uuid.UUID) (array) + if len("DealUUID") > 8192 { + return xerrors.Errorf("Value in field \"DealUUID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealUUID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealUUID")); err != nil { + return err + } + + if len(t.DealUUID) > 2097152 { + return xerrors.Errorf("Byte array in field t.DealUUID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.DealUUID))); err != nil { + return err + } + + if _, err := cw.Write(t.DealUUID[:]); err != nil { + return err + } + + // t.Transfer (mk12.Transfer) (struct) + if len("Transfer") > 8192 { + return xerrors.Errorf("Value in field \"Transfer\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Transfer"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Transfer")); err != nil { + return err + } + + if err := t.Transfer.MarshalCBOR(cw); err != nil { + return err + } + + // t.IsOffline (bool) (bool) + if len("IsOffline") > 8192 { + return xerrors.Errorf("Value in field \"IsOffline\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("IsOffline"))); err != nil { + return err + } + if _, err := cw.WriteString(string("IsOffline")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.IsOffline); err != nil { + return err + } + + // t.DealDataRoot (cid.Cid) (struct) + if len("DealDataRoot") > 8192 { + return xerrors.Errorf("Value in field \"DealDataRoot\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealDataRoot"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealDataRoot")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.DealDataRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.DealDataRoot: %w", err) + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > 8192 { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealParamsV120) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealParamsV120{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealParamsV120: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealUUID (uuid.UUID) (array) + case "DealUUID": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.DealUUID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.DealUUID = [16]uint8{} + if _, err := io.ReadFull(cr, t.DealUUID[:]); err != nil { + return err + } + // t.Transfer (mk12.Transfer) (struct) + case "Transfer": + + { + + if err := t.Transfer.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Transfer: %w", err) + } + + } + // t.IsOffline (bool) (bool) + case "IsOffline": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.IsOffline = false + case 21: + t.IsOffline = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealDataRoot (cid.Cid) (struct) + case "DealDataRoot": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.DealDataRoot: %w", err) + } + + t.DealDataRoot = c + + } + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{167}); err != nil { + return err + } + + // t.DealUUID (uuid.UUID) (array) + if len("DealUUID") > 8192 { + return xerrors.Errorf("Value in field \"DealUUID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealUUID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealUUID")); err != nil { + return err + } + + if len(t.DealUUID) > 2097152 { + return xerrors.Errorf("Byte array in field t.DealUUID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.DealUUID))); err != nil { + return err + } + + if _, err := cw.Write(t.DealUUID[:]); err != nil { + return err + } + + // t.Transfer (mk12.Transfer) (struct) + if len("Transfer") > 8192 { + return xerrors.Errorf("Value in field \"Transfer\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Transfer"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Transfer")); err != nil { + return err + } + + if err := t.Transfer.MarshalCBOR(cw); err != nil { + return err + } + + // t.IsOffline (bool) (bool) + if len("IsOffline") > 8192 { + return xerrors.Errorf("Value in field \"IsOffline\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("IsOffline"))); err != nil { + return err + } + if _, err := cw.WriteString(string("IsOffline")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.IsOffline); err != nil { + return err + } + + // t.DealDataRoot (cid.Cid) (struct) + if len("DealDataRoot") > 8192 { + return xerrors.Errorf("Value in field \"DealDataRoot\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealDataRoot"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealDataRoot")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.DealDataRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.DealDataRoot: %w", err) + } + + // t.SkipIPNIAnnounce (bool) (bool) + if len("SkipIPNIAnnounce") > 8192 { + return xerrors.Errorf("Value in field \"SkipIPNIAnnounce\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SkipIPNIAnnounce"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SkipIPNIAnnounce")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.SkipIPNIAnnounce); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > 8192 { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.RemoveUnsealedCopy (bool) (bool) + if len("RemoveUnsealedCopy") > 8192 { + return xerrors.Errorf("Value in field \"RemoveUnsealedCopy\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoveUnsealedCopy"))); err != nil { + return err + } + if _, err := cw.WriteString(string("RemoveUnsealedCopy")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.RemoveUnsealedCopy); err != nil { + return err + } + return nil +} + +func (t *DealParams) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealParams{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealParams: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealUUID (uuid.UUID) (array) + case "DealUUID": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.DealUUID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.DealUUID = [16]uint8{} + if _, err := io.ReadFull(cr, t.DealUUID[:]); err != nil { + return err + } + // t.Transfer (mk12.Transfer) (struct) + case "Transfer": + + { + + if err := t.Transfer.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Transfer: %w", err) + } + + } + // t.IsOffline (bool) (bool) + case "IsOffline": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.IsOffline = false + case 21: + t.IsOffline = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealDataRoot (cid.Cid) (struct) + case "DealDataRoot": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.DealDataRoot: %w", err) + } + + t.DealDataRoot = c + + } + // t.SkipIPNIAnnounce (bool) (bool) + case "SkipIPNIAnnounce": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.SkipIPNIAnnounce = false + case 21: + t.SkipIPNIAnnounce = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.RemoveUnsealedCopy (bool) (bool) + case "RemoveUnsealedCopy": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.RemoveUnsealedCopy = false + case 21: + t.RemoveUnsealedCopy = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DirectDealParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{170}); err != nil { + return err + } + + // t.DealUUID (uuid.UUID) (array) + if len("DealUUID") > 8192 { + return xerrors.Errorf("Value in field \"DealUUID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealUUID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealUUID")); err != nil { + return err + } + + if len(t.DealUUID) > 2097152 { + return xerrors.Errorf("Byte array in field t.DealUUID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.DealUUID))); err != nil { + return err + } + + if _, err := cw.Write(t.DealUUID[:]); err != nil { + return err + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > 8192 { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + + // t.FilePath (string) (string) + if len("FilePath") > 8192 { + return xerrors.Errorf("Value in field \"FilePath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FilePath"))); err != nil { + return err + } + if _, err := cw.WriteString(string("FilePath")); err != nil { + return err + } + + if len(t.FilePath) > 8192 { + return xerrors.Errorf("Value in field t.FilePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.FilePath))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.FilePath)); err != nil { + return err + } + + // t.PieceCid (cid.Cid) (struct) + if len("PieceCid") > 8192 { + return xerrors.Errorf("Value in field \"PieceCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + + // t.ClientAddr (address.Address) (struct) + if len("ClientAddr") > 8192 { + return xerrors.Errorf("Value in field \"ClientAddr\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientAddr"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientAddr")); err != nil { + return err + } + + if err := t.ClientAddr.MarshalCBOR(cw); err != nil { + return err + } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > 8192 { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + // t.AllocationID (verifreg.AllocationId) (uint64) + if len("AllocationID") > 8192 { + return xerrors.Errorf("Value in field \"AllocationID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AllocationID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("AllocationID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.AllocationID)); err != nil { + return err + } + + // t.SkipIPNIAnnounce (bool) (bool) + if len("SkipIPNIAnnounce") > 8192 { + return xerrors.Errorf("Value in field \"SkipIPNIAnnounce\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SkipIPNIAnnounce"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SkipIPNIAnnounce")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.SkipIPNIAnnounce); err != nil { + return err + } + + // t.DeleteAfterImport (bool) (bool) + if len("DeleteAfterImport") > 8192 { + return xerrors.Errorf("Value in field \"DeleteAfterImport\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DeleteAfterImport"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DeleteAfterImport")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.DeleteAfterImport); err != nil { + return err + } + + // t.RemoveUnsealedCopy (bool) (bool) + if len("RemoveUnsealedCopy") > 8192 { + return xerrors.Errorf("Value in field \"RemoveUnsealedCopy\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoveUnsealedCopy"))); err != nil { + return err + } + if _, err := cw.WriteString(string("RemoveUnsealedCopy")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.RemoveUnsealedCopy); err != nil { + return err + } + return nil +} + +func (t *DirectDealParams) UnmarshalCBOR(r io.Reader) (err error) { + *t = DirectDealParams{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DirectDealParams: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealUUID (uuid.UUID) (array) + case "DealUUID": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.DealUUID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.DealUUID = [16]uint8{} + if _, err := io.ReadFull(cr, t.DealUUID[:]); err != nil { + return err + } + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + // t.FilePath (string) (string) + case "FilePath": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.FilePath = string(sval) + } + // t.PieceCid (cid.Cid) (struct) + case "PieceCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = c + + } + // t.ClientAddr (address.Address) (struct) + case "ClientAddr": + + { + + if err := t.ClientAddr.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientAddr: %w", err) + } + + } + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + // t.AllocationID (verifreg.AllocationId) (uint64) + case "AllocationID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.AllocationID = verifreg.AllocationId(extra) + + } + // t.SkipIPNIAnnounce (bool) (bool) + case "SkipIPNIAnnounce": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.SkipIPNIAnnounce = false + case 21: + t.SkipIPNIAnnounce = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DeleteAfterImport (bool) (bool) + case "DeleteAfterImport": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.DeleteAfterImport = false + case 21: + t.DeleteAfterImport = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.RemoveUnsealedCopy (bool) (bool) + case "RemoveUnsealedCopy": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.RemoveUnsealedCopy = false + case 21: + t.RemoveUnsealedCopy = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Transfer) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.Size (uint64) (uint64) + if len("Size") > 8192 { + return xerrors.Errorf("Value in field \"Size\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Size")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.Type (string) (string) + if len("Type") > 8192 { + return xerrors.Errorf("Value in field \"Type\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Type"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Type")); err != nil { + return err + } + + if len(t.Type) > 8192 { + return xerrors.Errorf("Value in field t.Type was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Type)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len("Params") > 8192 { + return xerrors.Errorf("Value in field \"Params\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Params"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Params")); err != nil { + return err + } + + if len(t.Params) > 2097152 { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := cw.Write(t.Params); err != nil { + return err + } + + // t.ClientID (string) (string) + if len("ClientID") > 8192 { + return xerrors.Errorf("Value in field \"ClientID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ClientID")); err != nil { + return err + } + + if len(t.ClientID) > 8192 { + return xerrors.Errorf("Value in field t.ClientID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ClientID))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.ClientID)); err != nil { + return err + } + return nil +} + +func (t *Transfer) UnmarshalCBOR(r io.Reader) (err error) { + *t = Transfer{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Transfer: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Size (uint64) (uint64) + case "Size": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.Type (string) (string) + case "Type": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Type = string(sval) + } + // t.Params ([]uint8) (slice) + case "Params": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Params); err != nil { + return err + } + + // t.ClientID (string) (string) + case "ClientID": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.ClientID = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > 8192 { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Message")); err != nil { + return err + } + + if len(t.Message) > 8192 { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Message)); err != nil { + return err + } + + // t.Accepted (bool) (bool) + if len("Accepted") > 8192 { + return xerrors.Errorf("Value in field \"Accepted\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Accepted"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Accepted")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.Accepted); err != nil { + return err + } + return nil +} + +func (t *DealResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Accepted (bool) (bool) + case "Accepted": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Accepted = false + case 21: + t.Accepted = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.DealUUID (uuid.UUID) (array) + if len("DealUUID") > 8192 { + return xerrors.Errorf("Value in field \"DealUUID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealUUID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealUUID")); err != nil { + return err + } + + if len(t.DealUUID) > 2097152 { + return xerrors.Errorf("Byte array in field t.DealUUID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.DealUUID))); err != nil { + return err + } + + if _, err := cw.Write(t.DealUUID[:]); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > 8192 { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealUUID (uuid.UUID) (array) + case "DealUUID": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.DealUUID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.DealUUID = [16]uint8{} + if _, err := io.ReadFull(cr, t.DealUUID[:]); err != nil { + return err + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.Error (string) (string) + if len("Error") > 8192 { + return xerrors.Errorf("Value in field \"Error\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Error"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Error")); err != nil { + return err + } + + if len(t.Error) > 8192 { + return xerrors.Errorf("Value in field t.Error was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Error))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Error)); err != nil { + return err + } + + // t.DealUUID (uuid.UUID) (array) + if len("DealUUID") > 8192 { + return xerrors.Errorf("Value in field \"DealUUID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealUUID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealUUID")); err != nil { + return err + } + + if len(t.DealUUID) > 2097152 { + return xerrors.Errorf("Byte array in field t.DealUUID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.DealUUID))); err != nil { + return err + } + + if _, err := cw.Write(t.DealUUID[:]); err != nil { + return err + } + + // t.IsOffline (bool) (bool) + if len("IsOffline") > 8192 { + return xerrors.Errorf("Value in field \"IsOffline\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("IsOffline"))); err != nil { + return err + } + if _, err := cw.WriteString(string("IsOffline")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.IsOffline); err != nil { + return err + } + + // t.DealStatus (mk12.DealStatus) (struct) + if len("DealStatus") > 8192 { + return xerrors.Errorf("Value in field \"DealStatus\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealStatus"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealStatus")); err != nil { + return err + } + + if err := t.DealStatus.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferSize (uint64) (uint64) + if len("TransferSize") > 8192 { + return xerrors.Errorf("Value in field \"TransferSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferSize"))); err != nil { + return err + } + if _, err := cw.WriteString(string("TransferSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TransferSize)); err != nil { + return err + } + + // t.NBytesReceived (uint64) (uint64) + if len("NBytesReceived") > 8192 { + return xerrors.Errorf("Value in field \"NBytesReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("NBytesReceived"))); err != nil { + return err + } + if _, err := cw.WriteString(string("NBytesReceived")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.NBytesReceived)); err != nil { + return err + } + + return nil +} + +func (t *DealStatusResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Error (string) (string) + case "Error": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Error = string(sval) + } + // t.DealUUID (uuid.UUID) (array) + case "DealUUID": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 2097152 { + return fmt.Errorf("t.DealUUID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + if extra != 16 { + return fmt.Errorf("expected array to have 16 elements") + } + + t.DealUUID = [16]uint8{} + if _, err := io.ReadFull(cr, t.DealUUID[:]); err != nil { + return err + } + // t.IsOffline (bool) (bool) + case "IsOffline": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.IsOffline = false + case 21: + t.IsOffline = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealStatus (mk12.DealStatus) (struct) + case "DealStatus": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealStatus = new(DealStatus) + if err := t.DealStatus.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealStatus pointer: %w", err) + } + } + + } + // t.TransferSize (uint64) (uint64) + case "TransferSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TransferSize = uint64(extra) + + } + // t.NBytesReceived (uint64) (uint64) + case "NBytesReceived": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.NBytesReceived = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatus) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{167}); err != nil { + return err + } + + // t.Error (string) (string) + if len("Error") > 8192 { + return xerrors.Errorf("Value in field \"Error\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Error"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Error")); err != nil { + return err + } + + if len(t.Error) > 8192 { + return xerrors.Errorf("Value in field t.Error was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Error))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Error)); err != nil { + return err + } + + // t.Status (string) (string) + if len("Status") > 8192 { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Status")); err != nil { + return err + } + + if len(t.Status) > 8192 { + return xerrors.Errorf("Value in field t.Status was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Status))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.Status)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if len("Proposal") > 8192 { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("Proposal")); err != nil { + return err + } + + if err := t.Proposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > 8192 { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.ChainDealID (abi.DealID) (uint64) + if len("ChainDealID") > 8192 { + return xerrors.Errorf("Value in field \"ChainDealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChainDealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("ChainDealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ChainDealID)); err != nil { + return err + } + + // t.SealingStatus (string) (string) + if len("SealingStatus") > 8192 { + return xerrors.Errorf("Value in field \"SealingStatus\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SealingStatus"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SealingStatus")); err != nil { + return err + } + + if len(t.SealingStatus) > 8192 { + return xerrors.Errorf("Value in field t.SealingStatus was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.SealingStatus))); err != nil { + return err + } + if _, err := cw.WriteString(string(t.SealingStatus)); err != nil { + return err + } + + // t.SignedProposalCid (cid.Cid) (struct) + if len("SignedProposalCid") > 8192 { + return xerrors.Errorf("Value in field \"SignedProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SignedProposalCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("SignedProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.SignedProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.SignedProposalCid: %w", err) + } + + return nil +} + +func (t *DealStatus) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatus{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatus: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Error (string) (string) + case "Error": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Error = string(sval) + } + // t.Status (string) (string) + case "Status": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.Status = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + case "Proposal": + + { + + if err := t.Proposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal: %w", err) + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.ChainDealID (abi.DealID) (uint64) + case "ChainDealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ChainDealID = abi.DealID(extra) + + } + // t.SealingStatus (string) (string) + case "SealingStatus": + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + t.SealingStatus = string(sval) + } + // t.SignedProposalCid (cid.Cid) (struct) + case "SignedProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SignedProposalCid: %w", err) + } + + t.SignedProposalCid = c + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/market/deal_ingest_seal.go b/market/storageingest/deal_ingest_seal.go similarity index 56% rename from market/deal_ingest_seal.go rename to market/storageingest/deal_ingest_seal.go index e5dada0f6..7a54c712e 100644 --- a/market/deal_ingest_seal.go +++ b/market/storageingest/deal_ingest_seal.go @@ -1,9 +1,10 @@ -package market +package storageingest import ( "context" "encoding/json" "fmt" + "math" "net/http" "net/url" "time" @@ -19,6 +20,8 @@ import ( "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/tasks/seal" @@ -28,13 +31,14 @@ import ( lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" ) -var log = logging.Logger("piece-ingestor") - const loopFrequency = 10 * time.Second +var log = logging.Logger("storage-ingest") + type Ingester interface { - AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) - SectorStartSealing(context.Context, abi.SectorNumber) error + AllocatePieceToSector(ctx context.Context, tx *harmonydb.Tx, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) + SectorStartSealing(ctx context.Context, maddr address.Address, sector abi.SectorNumber) error + GetExpectedSealDuration() abi.ChainEpoch } type PieceIngesterApi interface { @@ -54,6 +58,7 @@ type PieceIngesterApi interface { } type openSector struct { + miner abi.ActorID number abi.SectorNumber currentSize abi.PaddedPieceSize earliestStartEpoch abi.ChainEpoch @@ -62,17 +67,22 @@ type openSector struct { latestEndEpoch abi.ChainEpoch } +type mdetails struct { + sealProof abi.RegisteredSealProof + updateProof abi.RegisteredUpdateProof + sectorSize abi.SectorSize +} + type PieceIngester struct { - ctx context.Context - db *harmonydb.DB - api PieceIngesterApi - miner address.Address - mid uint64 // miner ID - windowPoStProofType abi.RegisteredPoStProof - synth bool - sectorSize abi.SectorSize - sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method - maxWaitTime time.Duration + ctx context.Context + db *harmonydb.DB + api PieceIngesterApi + addToID map[address.Address]int64 + idToAddr map[abi.ActorID]address.Address + minerDetails map[int64]*mdetails + sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method + maxWaitTime time.Duration + expectedSealDuration abi.ChainEpoch } type verifiedDeal struct { @@ -81,28 +91,63 @@ type verifiedDeal struct { tmax abi.ChainEpoch } -func NewPieceIngester(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, maddr address.Address, sealRightNow bool, maxWaitTime time.Duration, synth bool) (*PieceIngester, error) { - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return nil, err +func NewPieceIngester(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, miners []address.Address, sealRightNow bool, cfg *config.CurioConfig) (*PieceIngester, error) { + if len(miners) == 0 { + return nil, xerrors.Errorf("no miners provided") } - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner ID: %w", err) + addToID := make(map[address.Address]int64) + minerDetails := make(map[int64]*mdetails) + idToAddr := make(map[abi.ActorID]address.Address) + + for _, maddr := range miners { + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + mid, err := address.IDFromAddress(maddr) + if err != nil { + return nil, xerrors.Errorf("getting miner ID: %w", err) + } + + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting network version: %w", err) + } + + proof, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, cfg.Subsystems.UseSyntheticPoRep) + if err != nil { + return nil, xerrors.Errorf("getting preferred seal proof type: %w", err) + } + + proofInfo, ok := abi.SealProofInfos[proof] + if !ok { + return nil, xerrors.Errorf("getting seal proof type: %w", err) + } + + addToID[maddr] = int64(mid) + minerDetails[int64(mid)] = &mdetails{ + sealProof: proof, + sectorSize: mi.SectorSize, + updateProof: proofInfo.UpdateProof, + } + idToAddr[abi.ActorID(mid)] = maddr } + epochs := time.Duration(cfg.Market.StorageMarketConfig.MK12.ExpectedPoRepSealDuration).Seconds() / float64(build.BlockDelaySecs) + expectedEpochs := math.Ceil(epochs) + pi := &PieceIngester{ - ctx: ctx, - db: db, - api: api, - sealRightNow: sealRightNow, - miner: maddr, - maxWaitTime: maxWaitTime, - sectorSize: mi.SectorSize, - windowPoStProofType: mi.WindowPoStProofType, - mid: mid, - synth: synth, + ctx: ctx, + db: db, + api: api, + sealRightNow: sealRightNow, + maxWaitTime: time.Duration(cfg.Ingest.MaxDealWaitTime), + addToID: addToID, + minerDetails: minerDetails, + idToAddr: idToAddr, + expectedSealDuration: abi.ChainEpoch(int64(expectedEpochs)), } go pi.start() @@ -133,55 +178,53 @@ func (p *PieceIngester) Seal() error { return xerrors.Errorf("getting chain head: %w", err) } - spt, err := p.getSealProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - shouldSeal := func(sector *openSector) bool { // Start sealing a sector if // 1. If sector is full // 2. We have been waiting for MaxWaitDuration - // 3. StartEpoch is less than 8 hours // todo: make this config? - if sector.currentSize == abi.PaddedPieceSize(p.sectorSize) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "sector full") + // 3. StartEpoch is currentEpoch + expectedSealDuration + if sector.currentSize == abi.PaddedPieceSize(p.minerDetails[int64(sector.miner)].sectorSize) { + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "sector full") return true } if time.Since(*sector.openedAt) > p.maxWaitTime { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "MaxWaitTime reached") + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "MaxWaitTime reached") return true } - if sector.earliestStartEpoch < head.Height()+abi.ChainEpoch(960) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "earliest start epoch") + if sector.earliestStartEpoch < head.Height()+p.expectedSealDuration { + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "earliest start epoch") return true } return false } comm, err := p.db.BeginTransaction(p.ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } + for _, mid := range p.addToID { + openSectors, err := p.getOpenSectors(tx, mid) + if err != nil { + return false, err + } - for _, sector := range openSectors { - sector := sector - if shouldSeal(sector) { - // Start sealing the sector - cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector.number, spt) + for _, sector := range openSectors { + sector := sector + if shouldSeal(sector) { + // Start sealing the sector + cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, mid, sector.number, p.minerDetails[mid].sealProof) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } + if err != nil { + return false, xerrors.Errorf("adding sector to pipeline: %w", err) + } - if cn != 1 { - return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned") - } + if cn != 1 { + return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned") + } - _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector.number) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) + _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", mid, sector.number) + if err != nil { + return false, xerrors.Errorf("adding sector to pipeline: %w", err) + } } + } } @@ -199,11 +242,7 @@ func (p *PieceIngester) Seal() error { return nil } -func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - if maddr != p.miner { - return api.SectorOffset{}, xerrors.Errorf("miner address doesn't match") - } - +func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, tx *harmonydb.Tx, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { var psize abi.PaddedPieceSize if piece.PieceActivationManifest != nil { @@ -270,7 +309,7 @@ func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address if !p.sealRightNow { // Try to allocate the piece to an open sector - allocated, ret, err := p.allocateToExisting(ctx, piece, psize, rawSize, source, dataHdrJson, propJson, vd) + allocated, ret, err := p.allocateToExisting(tx, maddr, piece, psize, rawSize, source, dataHdrJson, propJson, vd) if err != nil { return api.SectorOffset{}, err } @@ -280,125 +319,110 @@ func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address } // Allocation to open sector failed, create a new sector and add the piece to it - num, err := seal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - if len(numbers) != 1 { - return false, xerrors.Errorf("expected one sector number") - } - n := numbers[0] - - if piece.DealProposal != nil { - _, err = tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, - p.mid, n, 0, - piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) - if err != nil { - return false, xerrors.Errorf("adding deal to sector: %w", err) - } - } else { - _, err = tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, n, 0, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - if err != nil { - return false, xerrors.Errorf("adding deal to sector: %w", err) - } - } - return true, nil - }) + num, err := seal.AllocateSectorNumbers(ctx, p.api, tx, maddr, 1) if err != nil { - return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err) + return api.SectorOffset{}, xerrors.Errorf("allocating new sector: %w", err) } if len(num) != 1 { return api.SectorOffset{}, xerrors.Errorf("expected one sector number") } + n := num[0] + + // Assign piece to new sector + if piece.DealProposal != nil { + _, err = tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, + p.addToID[maddr], n, 0, + piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("adding deal to sector: %w", err) + } + } else { + _, err = tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + p.addToID[maddr], n, 0, + piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("adding deal to sector: %w", err) + } + } if p.sealRightNow { - err = p.SectorStartSealing(ctx, num[0]) + err = p.SectorStartSealing(ctx, maddr, num[0]) if err != nil { return api.SectorOffset{}, xerrors.Errorf("SectorStartSealing: %w", err) } } return api.SectorOffset{ - Sector: num[0], + Sector: n, Offset: 0, }, nil } -func (p *PieceIngester) allocateToExisting(ctx context.Context, piece lpiece.PieceDealInfo, psize abi.PaddedPieceSize, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) { +func (p *PieceIngester) allocateToExisting(tx *harmonydb.Tx, maddr address.Address, piece lpiece.PieceDealInfo, psize abi.PaddedPieceSize, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) { var ret api.SectorOffset var allocated bool var rerr error - comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } + openSectors, err := p.getOpenSectors(tx, p.addToID[maddr]) + if err != nil { + return false, api.SectorOffset{}, err + } - for _, sec := range openSectors { - sec := sec - if sec.currentSize+psize <= abi.PaddedPieceSize(p.sectorSize) { - if vd.isVerified { - sectorLifeTime := sec.latestEndEpoch - sec.earliestStartEpoch - // Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more - // Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086 - if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax { - continue - } + for _, sec := range openSectors { + sec := sec + if sec.currentSize+psize <= abi.PaddedPieceSize(p.minerDetails[p.addToID[maddr]].sectorSize) { + if vd.isVerified { + sectorLifeTime := sec.latestEndEpoch - sec.earliestStartEpoch + // Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more + // Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086 + if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax { + continue } + } - ret.Sector = sec.number - ret.Offset = sec.currentSize - - // Insert market deal to DB for the sector - if piece.DealProposal != nil { - cn, err := tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, - p.mid, sec.number, sec.index+1, - piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) + ret.Sector = sec.number + ret.Offset = sec.currentSize - if err != nil { - return false, fmt.Errorf("adding deal to sector: %v", err) - } + // Insert market deal to DB for the sector + if piece.DealProposal != nil { + cn, err := tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, + p.addToID[maddr], sec.number, sec.index+1, + piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) - if cn != 1 { - return false, xerrors.Errorf("expected one piece") - } + if err != nil { + return false, api.SectorOffset{}, fmt.Errorf("adding deal to sector: %v", err) + } - } else { // Insert DDO deal to DB for the sector - cn, err := tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, sec.number, sec.index+1, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) + if cn != 1 { + return false, api.SectorOffset{}, xerrors.Errorf("expected one piece") + } - if err != nil { - return false, fmt.Errorf("adding deal to sector: %v", err) - } + } else { // Insert DDO deal to DB for the sector + cn, err := tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + p.addToID[maddr], sec.number, sec.index+1, + piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - if cn != 1 { - return false, xerrors.Errorf("expected one piece") - } + if err != nil { + return false, api.SectorOffset{}, fmt.Errorf("adding deal to sector: %v", err) + } + if cn != 1 { + return false, api.SectorOffset{}, xerrors.Errorf("expected one piece") } - allocated = true - break + } + allocated = true + break } - return true, nil - }, harmonydb.OptionRetry()) - - if !comm { - rerr = xerrors.Errorf("allocating piece to a sector: commit failed") - } - - if err != nil { - rerr = xerrors.Errorf("allocating piece to a sector: %w", err) } return allocated, ret, rerr @@ -406,6 +430,7 @@ func (p *PieceIngester) allocateToExisting(ctx context.Context, piece lpiece.Pie } type pieceDetails struct { + Miner abi.ActorID `db:"sp_id"` Sector abi.SectorNumber `db:"sector_number"` Size abi.PaddedPieceSize `db:"piece_size"` StartEpoch abi.ChainEpoch `db:"deal_start_epoch"` @@ -414,17 +439,13 @@ type pieceDetails struct { CreatedAt *time.Time `db:"created_at"` } -func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.SectorNumber) error { - spt, err := p.getSealProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - +func (p *PieceIngester) SectorStartSealing(ctx context.Context, maddr address.Address, sector abi.SectorNumber) error { comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { // Get current open sector pieces from DB var pieces []pieceDetails err = tx.Select(&pieces, ` SELECT + sp_id, sector_number, piece_size, piece_index, @@ -436,7 +457,7 @@ func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.Secto WHERE sp_id = $1 AND sector_number = $2 AND is_snap = false ORDER BY - piece_index DESC;`, p.mid, sector) + piece_index DESC;`, p.addToID[maddr], sector) if err != nil { return false, xerrors.Errorf("getting open sectors from DB") } @@ -445,7 +466,7 @@ func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.Secto return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector) } - cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector, spt) + cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.addToID[maddr], sector, p.minerDetails[p.addToID[maddr]].sealProof) if err != nil { return false, xerrors.Errorf("adding sector to pipeline: %w", err) @@ -455,7 +476,7 @@ func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.Secto return false, xerrors.Errorf("incorrect number of rows returned") } - _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector) + _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.addToID[maddr], sector) if err != nil { return false, xerrors.Errorf("adding sector to pipeline: %w", err) } @@ -475,11 +496,12 @@ func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.Secto return nil } -func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) { +func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx, mid int64) ([]*openSector, error) { // Get current open sector pieces from DB var pieces []pieceDetails err := tx.Select(&pieces, ` SELECT + sp_id, sector_number, piece_size, piece_index, @@ -491,7 +513,7 @@ func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) WHERE sp_id = $1 AND is_snap = false ORDER BY - piece_index DESC;`, p.mid) + piece_index DESC;`, mid) if err != nil { return nil, xerrors.Errorf("getting open sectors from DB") } @@ -523,6 +545,7 @@ func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) sector, ok := sectorMap[pi.Sector] if !ok { sectorMap[pi.Sector] = &openSector{ + miner: pi.Miner, number: pi.Sector, currentSize: pi.Size, earliestStartEpoch: getStartEpoch(pi.StartEpoch, 0), @@ -551,11 +574,6 @@ func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) return os, nil } -func (p *PieceIngester) getSealProofType() (abi.RegisteredSealProof, error) { - nv, err := p.api.StateNetworkVersion(p.ctx, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("getting network version: %w", err) - } - - return miner.PreferredSealProofTypeFromWindowPoStType(nv, p.windowPoStProofType, p.synth) +func (p *PieceIngester) GetExpectedSealDuration() abi.ChainEpoch { + return p.expectedSealDuration } diff --git a/market/deal_ingest_snap.go b/market/storageingest/deal_ingest_snap.go similarity index 53% rename from market/deal_ingest_snap.go rename to market/storageingest/deal_ingest_snap.go index 89766d509..445f6d7aa 100644 --- a/market/deal_ingest_snap.go +++ b/market/storageingest/deal_ingest_snap.go @@ -1,9 +1,10 @@ -package market +package storageingest import ( "context" "encoding/json" "fmt" + "math" "net/http" "net/url" "time" @@ -19,6 +20,8 @@ import ( verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/lotus/api" @@ -37,38 +40,75 @@ const IdealEndEpochBuffer = 2 * builtin.EpochsInDay var SnapImmutableDeadlineEpochsBuffer = abi.ChainEpoch(40) type PieceIngesterSnap struct { - ctx context.Context - db *harmonydb.DB - api PieceIngesterApi - miner address.Address - mid uint64 // miner ID - windowPoStProofType abi.RegisteredPoStProof - sectorSize abi.SectorSize - sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method - maxWaitTime time.Duration + ctx context.Context + db *harmonydb.DB + api PieceIngesterApi + addToID map[address.Address]int64 + idToAddr map[abi.ActorID]address.Address + minerDetails map[int64]*mdetails + sectorSize abi.SectorSize + sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method + maxWaitTime time.Duration + expectedSnapDuration abi.ChainEpoch } -func NewPieceIngesterSnap(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, maddr address.Address, sealRightNow bool, maxWaitTime time.Duration) (*PieceIngesterSnap, error) { - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return nil, err +func NewPieceIngesterSnap(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, miners []address.Address, sealRightNow bool, cfg *config.CurioConfig) (*PieceIngesterSnap, error) { + if len(miners) == 0 { + return nil, xerrors.Errorf("no miners provided") } - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner ID: %w", err) + addToID := make(map[address.Address]int64) + minerDetails := make(map[int64]*mdetails) + idToAddr := make(map[abi.ActorID]address.Address) + + for _, maddr := range miners { + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + mid, err := address.IDFromAddress(maddr) + if err != nil { + return nil, xerrors.Errorf("getting miner ID: %w", err) + } + + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting network version: %w", err) + } + + proof, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, false) + if err != nil { + return nil, xerrors.Errorf("getting preferred seal proof type: %w", err) + } + + proofInfo, ok := abi.SealProofInfos[proof] + if !ok { + return nil, xerrors.Errorf("getting seal proof type: %w", err) + } + + addToID[maddr] = int64(mid) + minerDetails[int64(mid)] = &mdetails{ + sealProof: proof, + sectorSize: mi.SectorSize, + updateProof: proofInfo.UpdateProof, + } + idToAddr[abi.ActorID(mid)] = maddr } + epochs := time.Duration(cfg.Market.StorageMarketConfig.MK12.ExpectedSnapSealDuration).Seconds() / float64(build.BlockDelaySecs) + expectedEpochs := math.Ceil(epochs) + pi := &PieceIngesterSnap{ - ctx: ctx, - db: db, - api: api, - sealRightNow: sealRightNow, - miner: maddr, - maxWaitTime: maxWaitTime, - sectorSize: mi.SectorSize, - windowPoStProofType: mi.WindowPoStProofType, - mid: mid, + ctx: ctx, + db: db, + api: api, + sealRightNow: sealRightNow, + maxWaitTime: time.Duration(cfg.Ingest.MaxDealWaitTime), + addToID: addToID, + minerDetails: minerDetails, + idToAddr: idToAddr, + expectedSnapDuration: abi.ChainEpoch(int64(expectedEpochs)), } go pi.start() @@ -99,57 +139,54 @@ func (p *PieceIngesterSnap) Seal() error { return xerrors.Errorf("getting chain head: %w", err) } - upt, err := p.getUpgradeProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - shouldSeal := func(sector *openSector) bool { // Start sealing a sector if // 1. If sector is full // 2. We have been waiting for MaxWaitDuration - // 3. StartEpoch is less than 8 hours // todo: make this config? + // 3. StartEpoch is currentEpoch + expectedSealDuration if sector.currentSize == abi.PaddedPieceSize(p.sectorSize) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "sector full") + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "sector full") return true } if time.Since(*sector.openedAt) > p.maxWaitTime { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "MaxWaitTime reached") + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "MaxWaitTime reached") return true } - if sector.earliestStartEpoch < head.Height()+abi.ChainEpoch(960) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "earliest start epoch") + if sector.earliestStartEpoch < head.Height()+p.expectedSnapDuration { + log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "earliest start epoch") return true } return false } comm, err := p.db.BeginTransaction(p.ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } + for _, mid := range p.addToID { + openSectors, err := p.getOpenSectors(tx, mid) + if err != nil { + return false, err + } - for _, sector := range openSectors { - sector := sector - if shouldSeal(sector) { - // Start sealing the sector - cn, err := tx.Exec(`INSERT INTO sectors_snap_pipeline (sp_id, sector_number, upgrade_proof) VALUES ($1, $2, $3);`, p.mid, sector.number, upt) + for _, sector := range openSectors { + sector := sector + if shouldSeal(sector) { + // Start sealing the sector + cn, err := tx.Exec(`INSERT INTO sectors_snap_pipeline (sp_id, sector_number, upgrade_proof) VALUES ($1, $2, $3);`, mid, sector.number, p.minerDetails[mid].updateProof) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } + if err != nil { + return false, xerrors.Errorf("adding sector to pipeline: %w", err) + } - if cn != 1 { - return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned") - } + if cn != 1 { + return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned") + } - _, err = tx.Exec("SELECT transfer_and_delete_open_piece_snap($1, $2)", p.mid, sector.number) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) + _, err = tx.Exec("SELECT transfer_and_delete_open_piece_snap($1, $2)", mid, sector.number) + if err != nil { + return false, xerrors.Errorf("adding sector to pipeline: %w", err) + } } - } + } } return true, nil }, harmonydb.OptionRetry()) @@ -165,11 +202,7 @@ func (p *PieceIngesterSnap) Seal() error { return nil } -func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - if maddr != p.miner { - return api.SectorOffset{}, xerrors.Errorf("miner address doesn't match") - } - +func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, tx *harmonydb.Tx, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { var psize abi.PaddedPieceSize if piece.PieceActivationManifest != nil { @@ -272,7 +305,7 @@ func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr add if !p.sealRightNow { // Try to allocate the piece to an open sector - allocated, ret, err := p.allocateToExisting(ctx, piece, psize, rawSize, source, dataHdrJson, propJson, vd) + allocated, ret, err := p.allocateToExisting(ctx, tx, maddr, piece, psize, rawSize, source, dataHdrJson, propJson, vd) if err != nil { return api.SectorOffset{}, err } @@ -283,7 +316,7 @@ func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr add // non-mutable deadline is the current deadline and the next one. Doesn't matter if the current one was proven or not. - curDeadline, err := p.api.StateMinerProvingDeadline(ctx, p.miner, types.EmptyTSK) + curDeadline, err := p.api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) if err != nil { return api.SectorOffset{}, xerrors.Errorf("getting proving deadline: %w", err) } @@ -314,16 +347,15 @@ func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr add // /TX var num *int64 - _, err = p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - type CandidateSector struct { - Sector int64 `db:"sector_num"` - Expiration int64 `db:"expiration_epoch"` - } + type CandidateSector struct { + Sector int64 `db:"sector_num"` + Expiration int64 `db:"expiration_epoch"` + } - // maxExpiration = maybe(max sector expiration_epoch) - // minExpiration = piece.DealSchedule.EndEpoch - // ideal expiration = minExpiration + 2 days - rows, err := tx.Query(` + // maxExpiration = maybe(max sector expiration_epoch) + // minExpiration = piece.DealSchedule.EndEpoch + // ideal expiration = minExpiration + 2 days + rows, err := tx.Query(` SELECT sm.sector_num, sm.expiration_epoch FROM sectors_meta sm LEFT JOIN sectors_snap_pipeline ssp on sm.sp_id = ssp.sp_id and sm.sector_num = ssp.sector_number @@ -335,132 +367,122 @@ func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr add AND ($2 = 0 OR sm.expiration_epoch < $2) AND deadline IS NOT NULL AND deadline NOT IN ($5, $6, $7) ORDER BY ABS(sm.expiration_epoch - ($1 + $3)) - `, int64(piece.DealSchedule.EndEpoch), maxExpiration, IdealEndEpochBuffer, p.mid, dlIdxImmutableCur, dlIdxImmutableNext, dlIdxImmutableNextNext) + `, int64(piece.DealSchedule.EndEpoch), maxExpiration, IdealEndEpochBuffer, p.addToID[maddr], dlIdxImmutableCur, dlIdxImmutableNext, dlIdxImmutableNextNext) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err) + } + defer rows.Close() + + deadlineCache := map[uint64][]api.Partition{} + var tried int + var bestCandidate *CandidateSector + + for rows.Next() { + var candidate CandidateSector + err := rows.Scan(&candidate.Sector, &candidate.Expiration) if err != nil { - return false, xerrors.Errorf("allocating sector numbers: %w", err) + return api.SectorOffset{}, xerrors.Errorf("scanning row: %w", err) } - defer rows.Close() - - deadlineCache := map[uint64][]api.Partition{} - var tried int - var bestCandidate *CandidateSector + tried++ - for rows.Next() { - var candidate CandidateSector - err := rows.Scan(&candidate.Sector, &candidate.Expiration) - if err != nil { - return false, xerrors.Errorf("scanning row: %w", err) - } - tried++ + sloc, err := p.api.StateSectorPartition(ctx, maddr, abi.SectorNumber(candidate.Sector), types.EmptyTSK) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting sector locations: %w", err) + } - sloc, err := p.api.StateSectorPartition(ctx, p.miner, abi.SectorNumber(candidate.Sector), types.EmptyTSK) + if _, ok := deadlineCache[sloc.Deadline]; !ok { + dls, err := p.api.StateMinerPartitions(ctx, maddr, sloc.Deadline, types.EmptyTSK) if err != nil { - return false, xerrors.Errorf("getting sector locations: %w", err) - } - - if _, ok := deadlineCache[sloc.Deadline]; !ok { - dls, err := p.api.StateMinerPartitions(ctx, p.miner, sloc.Deadline, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting partitions: %w", err) - } - - deadlineCache[sloc.Deadline] = dls + return api.SectorOffset{}, xerrors.Errorf("getting partitions: %w", err) } - dl := deadlineCache[sloc.Deadline] - if len(dl) <= int(sloc.Partition) { - return false, xerrors.Errorf("partition %d not found in deadline %d", sloc.Partition, sloc.Deadline) - } - part := dl[sloc.Partition] + deadlineCache[sloc.Deadline] = dls + } - active, err := part.ActiveSectors.IsSet(uint64(candidate.Sector)) - if err != nil { - return false, xerrors.Errorf("checking active sectors: %w", err) - } - if !active { - live, err1 := part.LiveSectors.IsSet(uint64(candidate.Sector)) - faulty, err2 := part.FaultySectors.IsSet(uint64(candidate.Sector)) - recovering, err3 := part.RecoveringSectors.IsSet(uint64(candidate.Sector)) - if err1 != nil || err2 != nil || err3 != nil { - return false, xerrors.Errorf("checking sector status: %w, %w, %w", err1, err2, err3) - } + dl := deadlineCache[sloc.Deadline] + if len(dl) <= int(sloc.Partition) { + return api.SectorOffset{}, xerrors.Errorf("partition %d not found in deadline %d", sloc.Partition, sloc.Deadline) + } + part := dl[sloc.Partition] - log.Debugw("sector not active, skipping", "sector", candidate.Sector, "live", live, "faulty", faulty, "recovering", recovering) - continue + active, err := part.ActiveSectors.IsSet(uint64(candidate.Sector)) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("checking active sectors: %w", err) + } + if !active { + live, err1 := part.LiveSectors.IsSet(uint64(candidate.Sector)) + faulty, err2 := part.FaultySectors.IsSet(uint64(candidate.Sector)) + recovering, err3 := part.RecoveringSectors.IsSet(uint64(candidate.Sector)) + if err1 != nil || err2 != nil || err3 != nil { + return api.SectorOffset{}, xerrors.Errorf("checking sector status: %w, %w, %w", err1, err2, err3) } - bestCandidate = &candidate - break - } - - if err := rows.Err(); err != nil { - return false, xerrors.Errorf("iterating rows: %w", err) + log.Debugw("sector not active, skipping", "sector", candidate.Sector, "live", live, "faulty", faulty, "recovering", recovering) + continue } - rows.Close() + bestCandidate = &candidate + break + } - if bestCandidate == nil { - minEpoch := piece.DealSchedule.EndEpoch - maxEpoch := abi.ChainEpoch(maxExpiration) + if err := rows.Err(); err != nil { + return api.SectorOffset{}, xerrors.Errorf("iterating rows: %w", err) + } - minEpochDays := (minEpoch - head.Height()) / builtin.EpochsInDay - maxEpochDays := (maxEpoch - head.Height()) / builtin.EpochsInDay + rows.Close() - return false, xerrors.Errorf("no suitable sectors found, minEpoch: %d, maxEpoch: %d, minExpirationDays: %d, maxExpirationDays: %d (avoiding deadlines %d,%d,%d)", minEpoch, maxEpoch, minEpochDays, maxEpochDays, dlIdxImmutableCur, dlIdxImmutableNext, dlIdxImmutableNextNext) - } + if bestCandidate == nil { + minEpoch := piece.DealSchedule.EndEpoch + maxEpoch := abi.ChainEpoch(maxExpiration) - candidate := *bestCandidate + minEpochDays := (minEpoch - head.Height()) / builtin.EpochsInDay + maxEpochDays := (maxEpoch - head.Height()) / builtin.EpochsInDay - si, err := p.api.StateSectorGetInfo(ctx, p.miner, abi.SectorNumber(candidate.Sector), types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting sector info: %w", err) - } + return api.SectorOffset{}, xerrors.Errorf("no suitable sectors found, minEpoch: %d, maxEpoch: %d, minExpirationDays: %d, maxExpirationDays: %d (avoiding deadlines %d,%d,%d)", minEpoch, maxEpoch, minEpochDays, maxEpochDays, dlIdxImmutableCur, dlIdxImmutableNext, dlIdxImmutableNextNext) + } - sectorLifeTime := si.Expiration - head.Height() - if sectorLifeTime < 0 { - return false, xerrors.Errorf("sector lifetime is negative!?") - } - if piece.DealSchedule.EndEpoch > si.Expiration { - return false, xerrors.Errorf("sector expiration is too soon: %d < %d", si.Expiration, piece.DealSchedule.EndEpoch) - } - if maxExpiration != 0 && si.Expiration > abi.ChainEpoch(maxExpiration) { - return false, xerrors.Errorf("sector expiration is too late: %d > %d", si.Expiration, maxExpiration) - } + candidate := *bestCandidate - // info log detailing EVERYTHING including all the epoch bounds - log.Infow("allocating piece to sector", - "sector", candidate.Sector, - "expiration", si.Expiration, - "sectorLifeTime", sectorLifeTime, - "dealStartEpoch", piece.DealSchedule.StartEpoch, - "dealEndEpoch", piece.DealSchedule.EndEpoch, - "maxExpiration", maxExpiration, - "avoidingDeadlines", []int{int(dlIdxImmutableCur), int(dlIdxImmutableNext), int(dlIdxImmutableNextNext)}, - ) + si, err := p.api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(candidate.Sector), types.EmptyTSK) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting sector info: %w", err) + } - _, err = tx.Exec(`SELECT insert_snap_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, candidate.Sector, 0, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - if err != nil { - return false, xerrors.Errorf("adding deal to sector: %w", err) - } + sectorLifeTime := si.Expiration - head.Height() + if sectorLifeTime < 0 { + return api.SectorOffset{}, xerrors.Errorf("sector lifetime is negative!?") + } + if piece.DealSchedule.EndEpoch > si.Expiration { + return api.SectorOffset{}, xerrors.Errorf("sector expiration is too soon: %d < %d", si.Expiration, piece.DealSchedule.EndEpoch) + } + if maxExpiration != 0 && si.Expiration > abi.ChainEpoch(maxExpiration) { + return api.SectorOffset{}, xerrors.Errorf("sector expiration is too late: %d > %d", si.Expiration, maxExpiration) + } - num = &candidate.Sector + // info log detailing EVERYTHING including all the epoch bounds + log.Infow("allocating piece to sector", + "sector", candidate.Sector, + "expiration", si.Expiration, + "sectorLifeTime", sectorLifeTime, + "dealStartEpoch", piece.DealSchedule.StartEpoch, + "dealEndEpoch", piece.DealSchedule.EndEpoch, + "maxExpiration", maxExpiration, + "avoidingDeadlines", []int{int(dlIdxImmutableCur), int(dlIdxImmutableNext), int(dlIdxImmutableNextNext)}, + ) - return true, nil - }, harmonydb.OptionRetry()) + _, err = tx.Exec(`SELECT insert_snap_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + p.addToID[maddr], candidate.Sector, 0, + piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) if err != nil { - return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err) + return api.SectorOffset{}, xerrors.Errorf("adding deal to sector: %w", err) } - if num == nil { - return api.SectorOffset{}, xerrors.Errorf("expected one sector number") - } + num = &candidate.Sector if p.sealRightNow { - err = p.SectorStartSealing(ctx, abi.SectorNumber(*num)) + err = p.SectorStartSealing(ctx, maddr, abi.SectorNumber(*num)) if err != nil { return api.SectorOffset{}, xerrors.Errorf("SectorStartSealing: %w", err) } @@ -472,7 +494,7 @@ func (p *PieceIngesterSnap) AllocatePieceToSector(ctx context.Context, maddr add }, nil } -func (p *PieceIngesterSnap) allocateToExisting(ctx context.Context, piece lpiece.PieceDealInfo, psize abi.PaddedPieceSize, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) { +func (p *PieceIngesterSnap) allocateToExisting(ctx context.Context, tx *harmonydb.Tx, maddr address.Address, piece lpiece.PieceDealInfo, psize abi.PaddedPieceSize, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) { var ret api.SectorOffset var allocated bool var rerr error @@ -482,83 +504,68 @@ func (p *PieceIngesterSnap) allocateToExisting(ctx context.Context, piece lpiece return false, api.SectorOffset{}, xerrors.Errorf("getting chain head: %w", err) } - comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } - - for _, sec := range openSectors { - sec := sec - if sec.currentSize+psize <= abi.PaddedPieceSize(p.sectorSize) { - if vd.isVerified { - si, err := p.api.StateSectorGetInfo(ctx, p.miner, sec.number, types.EmptyTSK) - if err != nil { - log.Errorw("getting sector info", "error", err, "sector", sec.number, "miner", p.miner) - continue - } - - sectorLifeTime := si.Expiration - head.Height() - if sectorLifeTime < 0 { - log.Errorw("sector lifetime is negative", "sector", sec.number, "miner", p.miner, "lifetime", sectorLifeTime) - continue - } + openSectors, err := p.getOpenSectors(tx, p.addToID[maddr]) + if err != nil { + return false, api.SectorOffset{}, err + } - // Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more - // Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086 - if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax { - continue - } + for _, sec := range openSectors { + sec := sec + if sec.currentSize+psize <= abi.PaddedPieceSize(p.sectorSize) { + if vd.isVerified { + si, err := p.api.StateSectorGetInfo(ctx, maddr, sec.number, types.EmptyTSK) + if err != nil { + log.Errorw("getting sector info", "error", err, "sector", sec.number, "miner", maddr) + continue } - ret.Sector = sec.number - ret.Offset = sec.currentSize - - // Insert DDO deal to DB for the sector - cn, err := tx.Exec(`SELECT insert_snap_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, sec.number, sec.index+1, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - - if err != nil { - return false, fmt.Errorf("adding deal to sector: %v", err) + sectorLifeTime := si.Expiration - head.Height() + if sectorLifeTime < 0 { + log.Errorw("sector lifetime is negative", "sector", sec.number, "miner", maddr, "lifetime", sectorLifeTime) + continue } - if cn != 1 { - return false, xerrors.Errorf("expected one piece") + // Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more + // Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086 + if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax { + continue } + } - allocated = true - break + ret.Sector = sec.number + ret.Offset = sec.currentSize + + // Insert DDO deal to DB for the sector + cn, err := tx.Exec(`SELECT insert_snap_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, + p.addToID[maddr], sec.number, sec.index+1, + piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, + source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, + piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) + + if err != nil { + return false, api.SectorOffset{}, fmt.Errorf("adding deal to sector: %v", err) } - } - return true, nil - }, harmonydb.OptionRetry()) - if !comm { - rerr = xerrors.Errorf("allocating piece to a sector: commit failed") - } + if cn != 1 { + return false, api.SectorOffset{}, xerrors.Errorf("expected one piece") + } - if err != nil { - rerr = xerrors.Errorf("allocating piece to a sector: %w", err) + allocated = true + break + } } return allocated, ret, rerr } -func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, sector abi.SectorNumber) error { - upt, err := p.getUpgradeProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - +func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, maddr address.Address, sector abi.SectorNumber) error { comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { // Get current open sector pieces from DB var pieces []pieceDetails err = tx.Select(&pieces, ` SELECT + sp_id, sector_number, piece_size, piece_index, @@ -570,7 +577,7 @@ func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, sector abi.S WHERE sp_id = $1 AND sector_number = $2 AND is_snap = true ORDER BY - piece_index DESC;`, p.mid, sector) + piece_index DESC;`, p.addToID[maddr], sector) if err != nil { return false, xerrors.Errorf("getting open sectors from DB") } @@ -579,7 +586,7 @@ func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, sector abi.S return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector) } - cn, err := tx.Exec(`INSERT INTO sectors_snap_pipeline (sp_id, sector_number, upgrade_proof) VALUES ($1, $2, $3);`, p.mid, sector, upt) + cn, err := tx.Exec(`INSERT INTO sectors_snap_pipeline (sp_id, sector_number, upgrade_proof) VALUES ($1, $2, $3);`, p.addToID[maddr], sector, p.minerDetails[p.addToID[maddr]].updateProof) if err != nil { return false, xerrors.Errorf("adding sector to pipeline: %w", err) @@ -589,7 +596,7 @@ func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, sector abi.S return false, xerrors.Errorf("incorrect number of rows returned") } - _, err = tx.Exec("SELECT transfer_and_delete_open_piece_snap($1, $2)", p.mid, sector) + _, err = tx.Exec("SELECT transfer_and_delete_open_piece_snap($1, $2)", p.addToID[maddr], sector) if err != nil { return false, xerrors.Errorf("adding sector to pipeline: %w", err) } @@ -609,11 +616,12 @@ func (p *PieceIngesterSnap) SectorStartSealing(ctx context.Context, sector abi.S return nil } -func (p *PieceIngesterSnap) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) { +func (p *PieceIngesterSnap) getOpenSectors(tx *harmonydb.Tx, mid int64) ([]*openSector, error) { // Get current open sector pieces from DB var pieces []pieceDetails err := tx.Select(&pieces, ` SELECT + sp_id, sector_number, piece_size, piece_index, @@ -625,7 +633,7 @@ func (p *PieceIngesterSnap) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, err WHERE sp_id = $1 AND is_snap = true ORDER BY - piece_index DESC;`, p.mid) + piece_index DESC;`, mid) if err != nil { return nil, xerrors.Errorf("getting open sectors from DB") } @@ -657,6 +665,7 @@ func (p *PieceIngesterSnap) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, err sector, ok := sectorMap[pi.Sector] if !ok { sectorMap[pi.Sector] = &openSector{ + miner: pi.Miner, number: pi.Sector, currentSize: pi.Size, earliestStartEpoch: getStartEpoch(pi.StartEpoch, 0), @@ -685,21 +694,6 @@ func (p *PieceIngesterSnap) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, err return os, nil } -func (p *PieceIngesterSnap) getUpgradeProofType() (abi.RegisteredUpdateProof, error) { - nv, err := p.api.StateNetworkVersion(p.ctx, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("getting network version: %w", err) - } - - spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, p.windowPoStProofType, false) - if err != nil { - return 0, xerrors.Errorf("getting seal proof type: %w", err) - } - - proofInfo, ok := abi.SealProofInfos[spt] - if !ok { - return 0, xerrors.Errorf("getting seal proof type: %w", err) - } - - return proofInfo.UpdateProof, nil +func (p *PieceIngesterSnap) GetExpectedSealDuration() abi.ChainEpoch { + return p.expectedSnapDuration } diff --git a/market/seal_now.go b/market/storageingest/seal_now.go similarity index 99% rename from market/seal_now.go rename to market/storageingest/seal_now.go index 66a50632d..5b6b83dd7 100644 --- a/market/seal_now.go +++ b/market/storageingest/seal_now.go @@ -1,4 +1,4 @@ -package market +package storageingest import ( "context" diff --git a/scripts/docgen/docgen.go b/scripts/docgen/docgen.go index 73ad48c1c..8781c1466 100644 --- a/scripts/docgen/docgen.go +++ b/scripts/docgen/docgen.go @@ -9,10 +9,8 @@ import ( "path/filepath" "reflect" "strings" - "time" "unicode" - "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" @@ -21,11 +19,10 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" "github.com/filecoin-project/curio/api" + storiface "github.com/filecoin-project/curio/lib/storiface" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var ExampleValues = map[reflect.Type]interface{}{ @@ -213,21 +210,6 @@ func init() { addExample(map[storiface.ID]string{ "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path", }) - addExample(map[uuid.UUID][]storiface.WorkerJob{ - uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { - { - ID: storiface.CallID{ - Sector: abi.SectorID{Miner: 1000, Number: 100}, - ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"), - }, - Sector: abi.SectorID{Miner: 1000, Number: 100}, - Task: sealtasks.TTPreCommit2, - RunWait: 0, - Start: time.Unix(1605172927, 0).UTC(), - Hostname: "host", - }, - }, - }) //addExample(map[uuid.UUID]storiface.WorkerStats{ // uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): { // Info: storiface.WorkerInfo{ diff --git a/tasks/gc/pipeline_meta_gc.go b/tasks/gc/pipeline_meta_gc.go index 2b0fecb9d..39d02af54 100644 --- a/tasks/gc/pipeline_meta_gc.go +++ b/tasks/gc/pipeline_meta_gc.go @@ -30,6 +30,9 @@ func (s *PipelineGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done if err := s.cleanupUpgrade(); err != nil { return false, xerrors.Errorf("cleanupUpgrade: %w", err) } + if err := s.cleanupMK12DealPipeline(); err != nil { + return false, xerrors.Errorf("cleanupMK12DealPipeline: %w", err) + } return true, nil } @@ -149,5 +152,19 @@ func (s *PipelineGC) cleanupUpgrade() error { return nil } +func (s *PipelineGC) cleanupMK12DealPipeline() error { + // Remove market_mk12_deal_pipeline entries where: + // sealed is true and indexed is true + ctx := context.Background() + + // Execute the query + _, err := s.db.Exec(ctx, `DELETE FROM market_mk12_deal_pipeline WHERE complete = TRUE;`) + if err != nil { + return xerrors.Errorf("failed to clean up sealed deals: %w", err) + } + + return nil +} + var _ harmonytask.TaskInterface = &PipelineGC{} var _ = harmonytask.Reg(&PipelineGC{}) diff --git a/tasks/gc/storage_endpoint_gc.go b/tasks/gc/storage_endpoint_gc.go index f923bb541..f6bb00b1d 100644 --- a/tasks/gc/storage_endpoint_gc.go +++ b/tasks/gc/storage_endpoint_gc.go @@ -14,10 +14,10 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("curiogc") diff --git a/tasks/gc/storage_gc_mark.go b/tasks/gc/storage_gc_mark.go index 91cecf0ed..9a184bb84 100644 --- a/tasks/gc/storage_gc_mark.go +++ b/tasks/gc/storage_gc_mark.go @@ -16,11 +16,11 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/curiochain" "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const StorageGCInterval = 9 * time.Minute diff --git a/tasks/gc/storage_gc_sweep.go b/tasks/gc/storage_gc_sweep.go index c7b0230a0..1666fdf21 100644 --- a/tasks/gc/storage_gc_sweep.go +++ b/tasks/gc/storage_gc_sweep.go @@ -13,8 +13,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type StorageGCSweep struct { diff --git a/tasks/indexing/task_indexing.go b/tasks/indexing/task_indexing.go new file mode 100644 index 000000000..872cec8ea --- /dev/null +++ b/tasks/indexing/task_indexing.go @@ -0,0 +1,347 @@ +package indexing + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + carv2 "github.com/ipld/go-car/v2" + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/pieceprovider" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/market/indexstore" +) + +var log = logging.Logger("indexing") + +type IndexingTask struct { + db *harmonydb.DB + indexStore *indexstore.IndexStore + pieceProvider *pieceprovider.PieceProvider + sc *ffi.SealCalls + cfg *config.CurioConfig + insertConcurrency int + insertBatchSize int +} + +func NewIndexingTask(db *harmonydb.DB, sc *ffi.SealCalls, indexStore *indexstore.IndexStore, pieceProvider *pieceprovider.PieceProvider, cfg *config.CurioConfig) *IndexingTask { + + return &IndexingTask{ + db: db, + indexStore: indexStore, + pieceProvider: pieceProvider, + sc: sc, + cfg: cfg, + insertConcurrency: cfg.Market.StorageMarketConfig.Indexing.InsertConcurrency, + insertBatchSize: cfg.Market.StorageMarketConfig.Indexing.InsertBatchSize, + } +} + +type itask struct { + UUID string `db:"uuid"` + SpID int64 `db:"sp_id"` + Sector abi.SectorNumber `db:"sector_number"` + Proof abi.RegisteredSealProof `db:"reg_seal_proof"` + PieceCid string `db:"piece_cid"` + Size abi.PaddedPieceSize `db:"piece_size"` + Offset int64 `db:"piece_offset"` + ChainID abi.DealID `db:"chain_deal_id"` + RawSize int64 `db:"raw_size"` + ShouldIndex bool `db:"should_index"` +} + +func (i *IndexingTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + + var tasks []itask + + ctx := context.Background() + + err = i.db.Select(ctx, &tasks, `SELECT + uuid, + sp_id, + sector_number, + piece_cid, + piece_size, + piece_offset, + reg_seal_proof, + chain_deal_id, + raw_size, + should_index + FROM + market_mk12_deal_pipeline + WHERE + indexing_task_id = $1;`, taskID) + if err != nil { + return false, xerrors.Errorf("getting indexing params: %w", err) + } + + if len(tasks) != 1 { + return false, xerrors.Errorf("expected 1 sector params, got %d", len(tasks)) + } + + task := tasks[0] + + // Check if piece is already indexed + var indexed bool + err = i.db.Select(ctx, &indexed, `SELECT indexed FROM market_piece_metadata WHERE piece_cid = $1`, task.PieceCid) + if err != nil { + return false, xerrors.Errorf("checking if piece is already indexed: %w", err) + } + + // Return early if already indexed or should not be indexed + if indexed || !task.ShouldIndex { + err = i.recordCompletion(ctx, task, taskID, false) + if err != nil { + return false, err + } + return true, nil + } + + unsealed, err := i.pieceProvider.IsUnsealed(ctx, storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(task.SpID), + Number: task.Sector, + }, + ProofType: task.Proof, + }, storiface.UnpaddedByteIndex(task.Offset), task.Size.Unpadded()) + if err != nil { + return false, xerrors.Errorf("checking if sector is unsealed :%w", err) + } + + if !unsealed { + return false, xerrors.Errorf("sector %d for miner %d is not unsealed", task.Sector, task.SpID) + } + + pieceCid, err := cid.Parse(task.PieceCid) + if err != nil { + return false, xerrors.Errorf("parsing piece CID: %w", err) + } + + reader, err := i.pieceProvider.ReadPiece(ctx, storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(task.SpID), + Number: task.Sector, + }, + ProofType: task.Proof, + }, storiface.UnpaddedByteIndex(task.Offset), abi.UnpaddedPieceSize(task.Size), pieceCid) + + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + defer reader.Close() + + startTime := time.Now() + + dealCfg := i.cfg.Market.StorageMarketConfig + chanSize := dealCfg.Indexing.InsertConcurrency * dealCfg.Indexing.InsertBatchSize + + recs := make(chan indexstore.Record, chanSize) + + //recs := make([]indexstore.Record, 0, chanSize) + opts := []carv2.Option{carv2.ZeroLengthSectionAsEOF(true)} + blockReader, err := carv2.NewBlockReader(reader, opts...) + if err != nil { + return false, fmt.Errorf("getting block reader over piece: %w", err) + } + + var eg errgroup.Group + + eg.Go(func() error { + serr := i.indexStore.AddIndex(ctx, pieceCid, recs) + if serr != nil { + return xerrors.Errorf("adding index to DB: %w", err) + } + return nil + }) + + blockMetadata, err := blockReader.SkipNext() + for err == nil { + recs <- indexstore.Record{ + Cid: blockMetadata.Cid, + OffsetSize: indexstore.OffsetSize{ + Offset: blockMetadata.SourceOffset, + Size: blockMetadata.Size, + }, + } + blockMetadata, err = blockReader.SkipNext() + } + if !errors.Is(err, io.EOF) { + return false, fmt.Errorf("generating index for piece: %w", err) + } + + // Close the channel + close(recs) + + // Wait till AddIndex is finished + err = eg.Wait() + if err != nil { + return false, xerrors.Errorf("adding index to DB: %w", err) + } + + log.Infof("Indexing deal %s took %d seconds", task.UUID, time.Since(startTime).Seconds()) + + err = i.recordCompletion(ctx, task, taskID, true) + if err != nil { + return false, err + } + + return true, nil +} + +// recordCompletion add the piece metadata and piece deal to the DB and +// records the completion of an indexing task in the database +func (i *IndexingTask) recordCompletion(ctx context.Context, task itask, taskID harmonytask.TaskID, indexed bool) error { + _, err := i.db.Exec(ctx, `SELECT process_piece_deal($1, $2, $3, $4, $5, $6, $7, $8, $9)`, + task.UUID, task.PieceCid, true, task.SpID, task.Sector, task.Offset, task.Size, task.RawSize, indexed) + if err != nil { + return xerrors.Errorf("failed to update piece metadata and piece deal for deal %s: %w", task.UUID, err) + } + + n, err := i.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET indexed = TRUE, complete = TRUE WHERE uuid = $1`, task.UUID) + if err != nil { + return xerrors.Errorf("store indexing success: updating pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store indexing success: updated %d rows", n) + } + return nil +} + +func (i *IndexingTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + var tasks []struct { + TaskID harmonytask.TaskID `db:"task_id"` + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + StorageID string `db:"storage_id"` + } + + if storiface.FTUnsealed != 1 { + panic("storiface.FTUnsealed != 1") + } + + ctx := context.Background() + + indIDs := make([]int64, len(ids)) + for i, id := range ids { + indIDs[i] = int64(id) + } + + err := i.db.Select(ctx, &tasks, ` + SELECT dp.indexing_task_id, dp.sp_id, dp.sector_number, l.storage_id FROM market_mk12_deal_pipeline dp + INNER JOIN sector_location l ON dp.sp_id = l.miner_id AND dp.sector_number = l.sector_num + WHERE dp.indexing_task_id = ANY ($1) AND l.sector_filetype = 1 +`, indIDs) + if err != nil { + return nil, xerrors.Errorf("getting tasks: %w", err) + } + + ls, err := i.sc.LocalStorage(ctx) + if err != nil { + return nil, xerrors.Errorf("getting local storage: %w", err) + } + + acceptables := map[harmonytask.TaskID]bool{} + + for _, t := range ids { + acceptables[t] = true + } + + for _, t := range tasks { + if _, ok := acceptables[t.TaskID]; !ok { + continue + } + + for _, l := range ls { + if string(l.ID) == t.StorageID { + return &t.TaskID, nil + } + } + } + + return nil, nil +} + +func (i *IndexingTask) TypeDetails() harmonytask.TaskTypeDetails { + + return harmonytask.TaskTypeDetails{ + Name: "Indexing", + Cost: resources.Resources{ + Cpu: 1, + Ram: uint64(i.insertBatchSize * i.insertConcurrency * 56 * 2), + }, + MaxFailures: 3, + IAmBored: passcall.Every(10*time.Second, func(taskFunc harmonytask.AddTaskFunc) error { + return i.schedule(context.Background(), taskFunc) + }), + } +} + +func (i *IndexingTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + // schedule submits + var stop bool + for !stop { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + stop = true // assume we're done until we find a task to schedule + + var pendings []struct { + UUID string `db:"uuid"` + } + + err := i.db.Select(ctx, &pendings, `SELECT uuid FROM market_mk12_deal_pipeline + WHERE sealed = TRUE + AND indexing_task_id IS NULL + ORDER BY indexing_created_at ASC;`) + if err != nil { + return false, xerrors.Errorf("getting pending indexing tasks: %w", err) + } + + if len(pendings) == 0 { + return false, nil + } + + pending := pendings[0] + + _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET indexing_task_id = $1 + WHERE indexing_task_id IS NULL AND uuid = $2`, id, pending.UUID) + if err != nil { + return false, xerrors.Errorf("updating indexing task id: %w", err) + } + + stop = false // we found a task to schedule, keep going + return true, nil + }) + } + + return nil +} + +func (i *IndexingTask) Adder(taskFunc harmonytask.AddTaskFunc) { +} + +func (i *IndexingTask) GetSpid(db *harmonydb.DB, taskID int64) string { + var spid string + err := db.QueryRow(context.Background(), `SELECT sp_id FROM market_mk12_deal_pipeline WHERE indexing_task_id = $1`, taskID).Scan(&spid) + if err != nil { + log.Errorf("getting spid: %s", err) + return "" + } + return spid +} + +var _ = harmonytask.Reg(&IndexingTask{}) +var _ harmonytask.TaskInterface = &IndexingTask{} diff --git a/tasks/metadata/task_sector_expirations.go b/tasks/metadata/task_sector_expirations.go index c50e21e5b..9761b73fd 100644 --- a/tasks/metadata/task_sector_expirations.go +++ b/tasks/metadata/task_sector_expirations.go @@ -5,6 +5,8 @@ import ( "time" cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -20,6 +22,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +var log = logging.Logger("metadata") + const SectorMetadataRefreshInterval = 191 * time.Minute type SectorMetadataNodeAPI interface { @@ -62,6 +66,47 @@ func (s *SectorMetadata) Do(taskID harmonytask.TaskID, stillOwned func() bool) ( astor := adt.WrapStore(ctx, cbor.NewCborStore(s.bstore)) minerStates := map[abi.ActorID]miner.State{} + type partitionUpdate struct { + SpID uint64 + SectorNum uint64 + Partition uint64 + Deadline uint64 + } + + const batchSize = 1000 + updateBatch := make([]partitionUpdate, 0, batchSize) + total := 0 + + flushBatch := func() error { + if len(updateBatch) == 0 { + return nil + } + + total += len(updateBatch) + log.Infow("updating sector partitions", "total", total) + + _, err := s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + batch := &pgx.Batch{} + for _, update := range updateBatch { + batch.Queue("UPDATE sectors_meta SET partition = $1, deadline = $2 WHERE sp_id = $3 AND sector_num = $4", + update.Partition, update.Deadline, update.SpID, update.SectorNum) + } + + br := tx.SendBatch(ctx, batch) + defer br.Close() + + for i := 0; i < batch.Len(); i++ { + _, err := br.Exec() + if err != nil { + return false, xerrors.Errorf("executing batch update %d: %w", i, err) + } + } + + return true, nil + }, harmonydb.OptionRetry()) + return err + } + for _, sector := range sectors { maddr, err := address.NewIDAddress(sector.SpID) if err != nil { @@ -70,7 +115,6 @@ func (s *SectorMetadata) Do(taskID harmonytask.TaskID, stillOwned func() bool) ( mstate, ok := minerStates[abi.ActorID(sector.SpID)] if !ok { - act, err := s.api.StateGetActor(ctx, maddr, types.EmptyTSK) if err != nil { return false, xerrors.Errorf("getting miner actor: %w", err) @@ -106,14 +150,28 @@ func (s *SectorMetadata) Do(taskID harmonytask.TaskID, stillOwned func() bool) ( } if loc != nil { - _, err := s.db.Exec(ctx, "update sectors_meta set partition = $1, deadline = $2 where sp_id = $3 and sector_num = $4", loc.Partition, loc.Deadline, sector.SpID, sector.SectorNum) - if err != nil { - return false, xerrors.Errorf("updating sector partition: %w", err) + updateBatch = append(updateBatch, partitionUpdate{ + SpID: sector.SpID, + SectorNum: sector.SectorNum, + Partition: loc.Partition, + Deadline: loc.Deadline, + }) + + if len(updateBatch) >= batchSize { + if err := flushBatch(); err != nil { + return false, xerrors.Errorf("flushing batch: %w", err) + } + updateBatch = updateBatch[:0] } } } } + // Flush any remaining updates + if err := flushBatch(); err != nil { + return false, xerrors.Errorf("flushing final batch: %w", err) + } + return true, nil } diff --git a/tasks/piece/task_cleanup_piece.go b/tasks/piece/task_cleanup_piece.go index c650b4a0c..205221ee8 100644 --- a/tasks/piece/task_cleanup_piece.go +++ b/tasks/piece/task_cleanup_piece.go @@ -11,8 +11,7 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/promise" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) type CleanupPieceTask struct { diff --git a/tasks/piece/task_park_piece.go b/tasks/piece/task_park_piece.go index 4775c670d..d94e1dbc5 100644 --- a/tasks/piece/task_park_piece.go +++ b/tasks/piece/task_park_piece.go @@ -3,6 +3,7 @@ package piece import ( "context" "encoding/json" + "net/http" "strconv" "time" @@ -17,8 +18,7 @@ import ( ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/promise" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) var log = logging.Logger("cu-piece") @@ -159,7 +159,13 @@ func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (d for i := range refData { if refData[i].DataURL != "" { - upr := dealdata.NewUrlReader(refData[i].DataURL, pieceRawSize) + hdrs := make(http.Header) + err = json.Unmarshal(refData[i].DataHeaders, &hdrs) + if err != nil { + return false, xerrors.Errorf("unmarshaling reference data headers: %w", err) + } + upr := dealdata.NewUrlReader(refData[i].DataURL, hdrs, pieceRawSize) + defer func() { _ = upr.Close() }() diff --git a/tasks/seal/sector_num_alloc.go b/tasks/seal/sector_num_alloc.go index 813e673ae..64014694a 100644 --- a/tasks/seal/sector_num_alloc.go +++ b/tasks/seal/sector_num_alloc.go @@ -19,7 +19,7 @@ type AllocAPI interface { StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) } -func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, maddr address.Address, count int, txcb ...func(*harmonydb.Tx, []abi.SectorNumber) (bool, error)) ([]abi.SectorNumber, error) { +func AllocateSectorNumbers(ctx context.Context, a AllocAPI, tx *harmonydb.Tx, maddr address.Address, count int) ([]abi.SectorNumber, error) { chainAlloc, err := a.StateMinerAllocated(ctx, maddr, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting on-chain allocated sector numbers: %w", err) @@ -32,96 +32,72 @@ func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, ma var res []abi.SectorNumber - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - res = nil // reset result in case of retry + // query from db, if exists unmarsal to bitfield + var dbAllocated bitfield.BitField + var rawJson []byte - // query from db, if exists unmarsal to bitfield - var dbAllocated bitfield.BitField - var rawJson []byte - - err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON FALSE WHERE sp_id = $1 OR sp_id IS NULL ORDER BY sp_id LIMIT 1", mid).Scan(&rawJson) - if err != nil { - return false, xerrors.Errorf("querying allocated sector numbers: %w", err) - } - - if rawJson != nil { - err = dbAllocated.UnmarshalJSON(rawJson) - if err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - } - - if err := dbAllocated.UnmarshalJSON(rawJson); err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - - merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } - - allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{ - { - Val: true, - Len: abi.MaxSectorNumber, - }, - }}) - if err != nil { - return false, xerrors.Errorf("creating assignable sector numbers: %w", err) - } - - inverted, err := bitfield.SubtractBitField(allAssignable, merged) - if err != nil { - return false, xerrors.Errorf("subtracting allocated sector numbers: %w", err) - } + err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON FALSE WHERE sp_id = $1 OR sp_id IS NULL ORDER BY sp_id LIMIT 1", mid).Scan(&rawJson) + if err != nil { + return res, xerrors.Errorf("querying allocated sector numbers: %w", err) + } - toAlloc, err := inverted.Slice(0, uint64(count)) + if rawJson != nil { + err = dbAllocated.UnmarshalJSON(rawJson) if err != nil { - return false, xerrors.Errorf("getting slice of allocated sector numbers: %w", err) + return res, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) } + } - err = toAlloc.ForEach(func(u uint64) error { - res = append(res, abi.SectorNumber(u)) - return nil - }) - if err != nil { - return false, xerrors.Errorf("iterating allocated sector numbers: %w", err) - } + if err := dbAllocated.UnmarshalJSON(rawJson); err != nil { + return res, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) + } - toPersist, err := bitfield.MergeBitFields(merged, toAlloc) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } + merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated) + if err != nil { + return res, xerrors.Errorf("merging allocated sector numbers: %w", err) + } - rawJson, err = toPersist.MarshalJSON() - if err != nil { - return false, xerrors.Errorf("marshaling allocated sector numbers: %w", err) - } + allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{ + { + Val: true, + Len: abi.MaxSectorNumber, + }, + }}) + if err != nil { + return res, xerrors.Errorf("creating assignable sector numbers: %w", err) + } - _, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson) - if err != nil { - return false, xerrors.Errorf("persisting allocated sector numbers: %w", err) - } + inverted, err := bitfield.SubtractBitField(allAssignable, merged) + if err != nil { + return res, xerrors.Errorf("subtracting allocated sector numbers: %w", err) + } - for i, f := range txcb { - commit, err = f(tx, res) - if err != nil { - return false, xerrors.Errorf("executing tx callback %d: %w", i, err) - } + toAlloc, err := inverted.Slice(0, uint64(count)) + if err != nil { + return res, xerrors.Errorf("getting slice of allocated sector numbers: %w", err) + } - if !commit { - return false, nil - } - } + err = toAlloc.ForEach(func(u uint64) error { + res = append(res, abi.SectorNumber(u)) + return nil + }) + if err != nil { + return res, xerrors.Errorf("iterating allocated sector numbers: %w", err) + } - return true, nil - }, harmonydb.OptionRetry()) + toPersist, err := bitfield.MergeBitFields(merged, toAlloc) + if err != nil { + return res, xerrors.Errorf("merging allocated sector numbers: %w", err) + } + rawJson, err = toPersist.MarshalJSON() if err != nil { - return nil, xerrors.Errorf("allocating sector numbers: %w", err) + return res, xerrors.Errorf("marshaling allocated sector numbers: %w", err) } - if !comm { - return nil, xerrors.Errorf("allocating sector numbers: commit failed") + + _, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson) + if err != nil { + return res, xerrors.Errorf("persisting allocated sector numbers: %w", err) } return res, nil diff --git a/tasks/seal/task_finalize.go b/tasks/seal/task_finalize.go index 72cd3c71b..15277c75a 100644 --- a/tasks/seal/task_finalize.go +++ b/tasks/seal/task_finalize.go @@ -12,8 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/slotmgr" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type FinalizeTask struct { diff --git a/tasks/seal/task_movestorage.go b/tasks/seal/task_movestorage.go index 63add8184..4e8dd9b49 100644 --- a/tasks/seal/task_movestorage.go +++ b/tasks/seal/task_movestorage.go @@ -2,6 +2,7 @@ package seal import ( "context" + "fmt" "golang.org/x/xerrors" @@ -12,8 +13,7 @@ import ( "github.com/filecoin-project/curio/harmony/resources" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type MoveStorageTask struct { @@ -70,6 +70,12 @@ func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) return false, xerrors.Errorf("updating task: %w", err) } + // Create a indexing task + _, err = m.db.Exec(ctx, `SELECT create_indexing_task($1, $2)`, taskID, "sectors_sdr_pipeline") + if err != nil { + return false, fmt.Errorf("error creating indexing task: %w", err) + } + return true, nil } diff --git a/tasks/seal/task_porep.go b/tasks/seal/task_porep.go index 7326c8236..0a2f352da 100644 --- a/tasks/seal/task_porep.go +++ b/tasks/seal/task_porep.go @@ -15,9 +15,9 @@ import ( "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type PoRepAPI interface { diff --git a/tasks/seal/task_sdr.go b/tasks/seal/task_sdr.go index c088bccb8..f990f1b8c 100644 --- a/tasks/seal/task_sdr.go +++ b/tasks/seal/task_sdr.go @@ -17,10 +17,10 @@ import ( "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var IsDevnet = build.BlockDelaySecs < 30 diff --git a/tasks/seal/task_submit_commit.go b/tasks/seal/task_submit_commit.go index e62bf4a7f..a8581614d 100644 --- a/tasks/seal/task_submit_commit.go +++ b/tasks/seal/task_submit_commit.go @@ -278,6 +278,8 @@ func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) return false, xerrors.Errorf("getting address for precommit: %w", err) } + log.Errorf("THE FAILED COMMIT MESSAGE PARAMS ARE %x", enc) + msg := &types.Message{ To: maddr, From: a, diff --git a/tasks/seal/task_synth_proofs.go b/tasks/seal/task_synth_proofs.go index 0eceafae9..70f2d3416 100644 --- a/tasks/seal/task_synth_proofs.go +++ b/tasks/seal/task_synth_proofs.go @@ -15,8 +15,7 @@ import ( "github.com/filecoin-project/curio/lib/dealdata" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type SyntheticProofTask struct { diff --git a/tasks/seal/task_treed.go b/tasks/seal/task_treed.go index dec8bf3c5..19dd598e2 100644 --- a/tasks/seal/task_treed.go +++ b/tasks/seal/task_treed.go @@ -12,8 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type TreeDTask struct { diff --git a/tasks/seal/task_treerc.go b/tasks/seal/task_treerc.go index 93ec7211e..120441ef7 100644 --- a/tasks/seal/task_treerc.go +++ b/tasks/seal/task_treerc.go @@ -14,8 +14,7 @@ import ( "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + storiface "github.com/filecoin-project/curio/lib/storiface" ) type TreeRCTask struct { diff --git a/tasks/sealsupra/task_supraseal.go b/tasks/sealsupra/task_supraseal.go index 9bd6997af..36dc235aa 100644 --- a/tasks/sealsupra/task_supraseal.go +++ b/tasks/sealsupra/task_supraseal.go @@ -26,11 +26,11 @@ import ( "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/slotmgr" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/lib/supraffi" "github.com/filecoin-project/curio/tasks/seal" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const suprasealConfigEnv = "SUPRASEAL_CONFIG" diff --git a/tasks/snap/task_encode.go b/tasks/snap/task_encode.go index dcc4b6239..42ab18e25 100644 --- a/tasks/snap/task_encode.go +++ b/tasks/snap/task_encode.go @@ -16,9 +16,8 @@ import ( "github.com/filecoin-project/curio/lib/dealdata" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/tasks/seal" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const MinSnapSchedInterval = 10 * time.Second diff --git a/tasks/snap/task_movestorage.go b/tasks/snap/task_movestorage.go index a269e365b..45d2c6103 100644 --- a/tasks/snap/task_movestorage.go +++ b/tasks/snap/task_movestorage.go @@ -2,6 +2,7 @@ package snap import ( "context" + "fmt" "math/rand/v2" "golang.org/x/xerrors" @@ -14,9 +15,8 @@ import ( "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/paths" + storiface "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/tasks/seal" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type MoveStorageTask struct { @@ -74,6 +74,12 @@ func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) return false, xerrors.Errorf("updating task: %w", err) } + // Create a indexing task + _, err = m.db.Exec(ctx, `SELECT create_indexing_task($1, $2)`, taskID, "sectors_snap_pipeline") + if err != nil { + return false, fmt.Errorf("error creating indexing task: %w", err) + } + return true, nil } diff --git a/tasks/snap/task_prove.go b/tasks/snap/task_prove.go index 0fea1079a..90467ef13 100644 --- a/tasks/snap/task_prove.go +++ b/tasks/snap/task_prove.go @@ -14,9 +14,8 @@ import ( "github.com/filecoin-project/curio/harmony/resources" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/tasks/seal" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type ProveTask struct { diff --git a/tasks/storage-market/storage_market.go b/tasks/storage-market/storage_market.go new file mode 100644 index 000000000..cf0e71229 --- /dev/null +++ b/tasks/storage-market/storage_market.go @@ -0,0 +1,625 @@ +package storage_market + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/promise" + "github.com/filecoin-project/curio/market/mk12" + "github.com/filecoin-project/curio/market/storageingest" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +var log = logging.Logger("storage-market") + +const ( + mk12Str = "mk12" + mk20Str = "mk20" +) + +const ( + pollerCommP = iota + pollerPSD + pollerFindDeal + + numPollers +) + +const dealPollerInterval = 30 * time.Second + +type storageMarketAPI interface { + mk12.MK12API + storageingest.PieceIngesterApi +} + +type CurioStorageDealMarket struct { + cfg *config.CurioConfig + db *harmonydb.DB + pin storageingest.Ingester + miners map[string][]address.Address + api storageMarketAPI + MK12Handler *mk12.MK12 + sc *ffi.SealCalls + urls map[string]http.Header + adders [numPollers]promise.Promise[harmonytask.AddTaskFunc] +} + +type MK12Pipeline struct { + UUID string `db:"uuid"` + SpID int64 `db:"sp_id"` + Started bool `db:"started"` + PieceCid string `db:"piece_cid"` + Offline bool `db:"offline"` + Downloaded bool `db:"downloaded"` + RawSize int64 `db:"raw_size"` + URL string `db:"url"` + Headers json.RawMessage `db:"headers"` + CommTaskID *int64 `db:"commp_task_id"` + AfterCommp bool `db:"after_commp"` + PSDWaitTime time.Time `db:"psd_wait_time"` + PSDTaskID *int64 `db:"psd_task_id"` + AfterPSD bool `db:"after_psd"` + FindDealTaskID *int64 `db:"find_deal_task_id"` + AfterFindDeal bool `db:"after_find_deal"` + Sector *int64 `db:"sector"` + Offset *int64 `Db:"sector_offset"` +} + +func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg *config.CurioConfig, sc *ffi.SealCalls, mapi storageMarketAPI) *CurioStorageDealMarket { + + moduleMap := make(map[string][]address.Address) + moduleMap[mk12Str] = append(moduleMap[mk12Str], miners...) + + urls := make(map[string]http.Header) + for _, curl := range cfg.Market.StorageMarketConfig.PieceLocator { + urls[curl.URL] = curl.Headers + } + + return &CurioStorageDealMarket{ + cfg: cfg, + db: db, + api: mapi, + miners: moduleMap, + sc: sc, + urls: urls, + } +} + +func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { + var err error + + for module, miners := range d.miners { + if module == mk12Str { + if len(miners) == 0 { + // Do not start the poller if no minerID present + return nil + } + d.MK12Handler, err = mk12.NewMK12Handler(miners, d.db, d.sc, d.api) + if err != nil { + return err + } + + if d.cfg.Ingest.DoSnap { + d.pin, err = storageingest.NewPieceIngesterSnap(ctx, d.db, d.api, miners, false, d.cfg) + } else { + d.pin, err = storageingest.NewPieceIngester(ctx, d.db, d.api, miners, false, d.cfg) + } + } + } + + if err != nil { + return err + } + go d.runPoller(ctx) + + return nil + +} + +func (d *CurioStorageDealMarket) runPoller(ctx context.Context) { + ticker := time.NewTicker(dealPollerInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + d.poll(ctx) + } + } +} + +func (d *CurioStorageDealMarket) poll(ctx context.Context) { + + /* + FULL DEAL FLOW: + Online: + 1. Make an entry for each online deal in market_mk12_deal_pipeline + 2. For online deals - keep checking if piecePark is complete + 4. Create commP task for online deal + 5. Once commP is complete, add the deal using pieceIngest + + Offline: + 1. Make an entry for each online deal in market_mk12_deal_pipeline + 2. Offline deal would not be started. It will have 2 triggers + A. We find a pieceCID <> URL binding + B. User manually imports the data using a file (will need piecePark) + 3. Check if piece is parked for offline deal triggered manually + 4. Create commP task for offline deals + A. If we have piecePark then do local commP + B. Do streaming commP if we have URL + 5. Once commP is complete, add the deal using pieceIngest + */ + for module, miners := range d.miners { + if module == mk12Str { + if len(miners) > 0 { + d.processMK12Deals(ctx) + } + } + } +} + +func (d *CurioStorageDealMarket) processMK12Deals(ctx context.Context) { + // Catch any panics if encountered as we are working with user provided data + defer func() { + if r := recover(); r != nil { + log.Errorf("panic occurred: %v", r) + } + }() + + // Get all deal sorted by start_epoch + var deals []MK12Pipeline + + err := d.db.Select(ctx, &deals, `SELECT + p.uuid as uuid, + p.sp_id as sp_id, + p.started as started, + p.piece_cid as piece_cid, + p.offline as offline, + p.raw_size as raw_size, + p.url as url, + p.headers as headers, + p.commp_task_id as commp_task_id, + p.after_commp as after_commp, + p.psd_task_id as psd_task_id, + p.after_psd as after_psd, + p.find_deal_task_id as find_deal_task_id, + p.after_find_deal as after_find_deal, + p.psd_wait_time as psd_wait_time, + b.start_epoch as start_epoch + FROM + market_mk12_deal_pipeline p + LEFT JOIN + market_mk12_deals b ON p.uuid = b.uuid + WHERE p.started = TRUE + ORDER BY b.start_epoch ASC;`) + + if err != nil { + log.Errorf("failed to get deal pipeline status from DB: %w", err) + } + + // Add PSD task - PSD is an exception which is processed for multiple deals at once to save + // gas cost for PSD messages + err = d.addPSDTask(ctx, deals) + if err != nil { + log.Errorf("%w", err) + } + + // Process deals + for _, deal := range deals { + deal := deal + err := d.processMk12Deal(ctx, deal) + if err != nil { + log.Errorf("%w", err) + } + } +} + +func (d *CurioStorageDealMarket) processMk12Deal(ctx context.Context, deal MK12Pipeline) error { + + // Try to mark the deal as started + if !deal.Started { + // Check if download is finished and update the deal state in DB + if deal.URL != "" { + goUrl, err := url.Parse(deal.URL) + if err != nil { + return xerrors.Errorf("UUID: %s parsing data URL: %w", deal.UUID, err) + } + + // If park piece ref URL + if goUrl.Scheme == "pieceref" { + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return xerrors.Errorf("UUID: %s parsing piece reference number: %w", deal.UUID, err) + } + + var complete bool + err = d.db.QueryRow(ctx, `SELECT pp.complete + FROM parked_pieces pp + JOIN parked_piece_refs ppr ON pp.id = ppr.piece_id + WHERE ppr.ref_id = $1;`, refNum).Scan(&complete) + if err != nil { + return xerrors.Errorf("UUID: %s getting piece park status: %w", deal.UUID, err) + } + + if complete { + deal.Started = true + _, err = d.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET started = TRUE WHERE uuid = $1`, deal.UUID) + if err != nil { + return xerrors.Errorf("failed to mark deal %s as started: %w", deal.UUID, err) + } + log.Infof("UUID: %s deal started successfully", deal.UUID) + return nil + } + } + } else { + // If no URL found for offline deal then we should try to find one + if deal.Offline { + err := d.findURLForOfflineDeals(ctx, deal.UUID, deal.PieceCid) + if err != nil { + return err + } + } + } + } + + // Create commP task + if deal.Started && !deal.AfterCommp && deal.CommTaskID == nil { + // Skip commP is configured to do so + if d.cfg.Market.StorageMarketConfig.MK12.SkipCommP { + _, err := d.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET after_commp = TRUE, commp_task_id = NULL WHERE uuid = $1`, deal.UUID) + if err != nil { + return xerrors.Errorf("UUID: %s: updating deal pipeline: %w", deal.UUID, err) + } + log.Infof("UUID: %s: commP skipped successfully", deal.UUID) + return nil + } + + if d.adders[pollerCommP].IsSet() { + d.adders[pollerCommP].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { + // update + n, err := tx.Exec(`UPDATE market_mk12_deal_pipeline SET commp_task_id = $1 + WHERE uuid = $2 AND started = TRUE AND commp_task_id IS NULL AND after_commp = FALSE`, id, deal.UUID) + if err != nil { + return false, xerrors.Errorf("UUID: %s: updating deal pipeline: %w", deal.UUID, err) + } + + // commit only if we updated the piece + return n > 0, nil + }) + log.Infof("UUID: %s: commP task created successfully", deal.UUID) + } + + return nil + } + + // Create Find Deal task + if deal.Started && deal.AfterCommp && deal.AfterPSD && !deal.AfterFindDeal && deal.FindDealTaskID == nil { + d.adders[pollerFindDeal].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { + // update + n, err := tx.Exec(`UPDATE market_mk12_deal_pipeline SET find_deal_task_id = $1 + WHERE uuid = $2 AND started = TRUE AND find_deal_task_id IS NULL + AND after_commp = TRUE AND after_psd = TRUE AND after_find_deal = FALSE`, id, deal.UUID) + if err != nil { + return false, xerrors.Errorf("UUID: %s: updating deal pipeline: %w", deal.UUID, err) + } + + // commit only if we updated the piece + return n > 0, nil + }) + log.Infof("UUID: %s: FindDeal task created successfully", deal.UUID) + return nil + } + + // If on chain deal ID is present, we should add the deal to a sector + if deal.AfterFindDeal && deal.Sector == nil && deal.Offset == nil { + err := d.ingestDeal(ctx, deal) + if err != nil { + return err + } + } + return nil +} + +type MarketMK12Deal struct { + UUID string `db:"uuid"` + CreatedAt time.Time `db:"created_at"` + SignedProposalCid string `db:"signed_proposal_cid"` + ProposalSignature []byte `db:"proposal_signature"` + Proposal []byte `db:"proposal"` + PieceCid string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + Offline bool `db:"offline"` + Verified bool `db:"verified"` + SpID int64 `db:"sp_id"` + StartEpoch int64 `db:"start_epoch"` + EndEpoch int64 `db:"end_epoch"` + ClientPeerID string `db:"client_peer_id"` + ChainDealID int64 `db:"chain_deal_id"` + PublishCid string `db:"publish_cid"` + FastRetrieval bool `db:"fast_retrieval"` + AnnounceToIpni bool `db:"announce_to_ipni"` + Error string `db:"error"` +} + +func (d *CurioStorageDealMarket) findURLForOfflineDeals(ctx context.Context, deal string, pcid string) error { + + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var updated bool + err = tx.QueryRow(` + WITH selected_data AS ( + SELECT url, headers, raw_size + FROM market_offline_urls + WHERE uuid = $1 + ) + UPDATE market_mk12_deal_pipeline + SET url = selected_data.url, + headers = selected_data.headers, + raw_size = selected_data.raw_size + FROM selected_data + WHERE market_mk12_deal_pipeline.uuid = $1 + RETURNING CASE + WHEN EXISTS (SELECT 1 FROM selected_data) THEN TRUE + ELSE FALSE + END;`, deal).Scan(&updated) + if err != nil { + return false, xerrors.Errorf("failed to update the pipeline for deal %s: %w", deal, err) + } + + if updated { + return true, nil + } + + // Check if We can find the URL for this piece on remote servers + for rUrl, headers := range d.urls { + // Create a new HTTP request + urlString := fmt.Sprintf("%s/pieces?id=%s", rUrl, pcid) + req, err := http.NewRequest(http.MethodGet, urlString, nil) + if err != nil { + return false, xerrors.Errorf("error creating request: %w", err) + } + + req.Header = headers + + // Create a client and make the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return false, xerrors.Errorf("error making GET request: %w", err) + } + + // Check the response code for 404 + if resp.StatusCode != http.StatusOK { + if resp.StatusCode != 404 { + return false, xerrors.Errorf("not ok response from HTTP server: %s", resp.Status) + } + continue + } + + hdrs, err := json.Marshal(headers) + if err != nil { + return false, xerrors.Errorf("marshaling headers: %w", err) + } + + rawSizeStr := resp.Header.Get("Content-Length") + if rawSizeStr == "" { + continue + } + rawSize, err := strconv.ParseInt(rawSizeStr, 10, 64) + if err != nil { + return false, xerrors.Errorf("failed to parse the raw size: %w", err) + } + + _, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET url = $1, headers = $2, raw_size = $3, started = TRUE + WHERE uuid = $4 AND started = FALSE`, urlString, hdrs, rawSize, deal) + if err != nil { + return false, xerrors.Errorf("store url for piece %s: updating pipeline: %w", pcid, err) + } + + return true, nil + } + return false, nil + + }, harmonydb.OptionRetry()) + if err != nil { + return xerrors.Errorf("deal %s: %w", deal, err) + } + if !comm { + return xerrors.Errorf("faile to commit the transaction for deal %s", deal) + } + return nil +} + +func (d *CurioStorageDealMarket) addPSDTask(ctx context.Context, deals []MK12Pipeline) error { + type queue struct { + deals []string + t time.Time + } + + dm := make(map[int64]queue) + + for _, deal := range deals { + if deal.Started && deal.AfterCommp && !deal.AfterPSD && deal.PSDTaskID == nil { + // Check if the spID is already in the map + if q, exists := dm[deal.SpID]; exists { + // Append the UUID to the deals list + q.deals = append(q.deals, deal.UUID) + + // Update the time if the current deal's time is older + if deal.PSDWaitTime.Before(q.t) { + q.t = deal.PSDWaitTime + } + + // Update the map with the new queue + dm[deal.SpID] = q + } else { + // Add a new entry to the map if spID is not present + dm[deal.SpID] = queue{ + deals: []string{deal.UUID}, + t: deal.PSDWaitTime, + } + } + } + } + + publishPeriod := d.cfg.Market.StorageMarketConfig.MK12.PublishMsgPeriod + maxDeals := d.cfg.Market.StorageMarketConfig.MK12.MaxDealsPerPublishMsg + + for _, q := range dm { + if q.t.Add(time.Duration(publishPeriod)).After(time.Now()) || uint64(len(q.deals)) > maxDeals { + d.adders[pollerPSD].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { + // update + n, err := tx.Exec(`UPDATE market_mk12_deal_pipeline SET psd_task_id = $1 + WHERE uuid = ANY($2) AND started = TRUE AND after_commp = TRUE + AND psd_task_id IS NULL`, id, q.deals) + if err != nil { + return false, xerrors.Errorf("updating deal pipeline: %w", err) + } + return n > 0, nil + }) + } + log.Infof("PSD task created successfully for deals %s", q.deals) + } + return nil +} + +func (d *CurioStorageDealMarket) ingestDeal(ctx context.Context, deal MK12Pipeline) error { + var info api.SectorOffset + + comm, err := d.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + // Prepare a variable to hold the result + var dbdeals []MarketMK12Deal + + err = tx.Select(&dbdeals, `SELECT + uuid, + created_at, + signed_proposal_cid, + proposal_signature, + proposal, + piece_cid, + piece_size, + offline, + verified, + sp_id, + start_epoch, + end_epoch, + client_peer_id, + chain_deal_id, + publish_cid, + fast_retrieval, + announce_to_ipni, + url, + url_headers, + error + FROM market_mk12_deals + WHERE uuid = $1;`, deal.UUID) + if err != nil { + return false, xerrors.Errorf("failed to get MK12 deals from DB") + } + + if len(dbdeals) != 1 { + return false, xerrors.Errorf("expected 1 deal, got %d for UUID %s", len(dbdeals), deal.UUID) + } + + dbdeal := dbdeals[0] + + maddr, err := address.NewIDAddress(uint64(dbdeal.SpID)) + if err != nil { + return false, xerrors.Errorf("UUID: %s: %w", deal.UUID, err) + } + + var prop market.DealProposal + err = json.Unmarshal(dbdeal.Proposal, &prop) + if err != nil { + return false, xerrors.Errorf("UUID: %s: %w", deal.UUID, err) + } + + pcid, err := cid.Parse(dbdeal.PublishCid) + if err != nil { + return false, xerrors.Errorf("UUID: %s: %w", deal.UUID, err) + } + + pi := piece.PieceDealInfo{ + PublishCid: &pcid, + DealID: abi.DealID(dbdeal.ChainDealID), + DealProposal: &prop, + DealSchedule: piece.DealSchedule{ + StartEpoch: abi.ChainEpoch(dbdeal.StartEpoch), + EndEpoch: abi.ChainEpoch(dbdeal.EndEpoch), + }, + PieceActivationManifest: nil, + KeepUnsealed: true, + } + + dealUrl, err := url.Parse(deal.URL) + if err != nil { + return false, xerrors.Errorf("UUID: %s: %w", deal.UUID, err) + } + + headers := make(http.Header) + err = json.Unmarshal(deal.Headers, &headers) + if err != nil { + return false, xerrors.Errorf("UUID: %s: %w", deal.UUID, err) + } + + var shouldProceed bool + + err = tx.QueryRow(`SELECT EXISTS(SELECT TRUE FROM market_mk12_deal_pipeline WHERE uuid = $1 AND sector IS NULL)`, deal.UUID).Scan(&shouldProceed) + if err != nil { + return false, xerrors.Errorf("failed to check status in DB before adding to sector: %w", err) + } + + if !shouldProceed { + // Exit early + return false, xerrors.Errorf("deal %s already added to sector by another process", deal.UUID) + } + + info, err = d.pin.AllocatePieceToSector(ctx, tx, maddr, pi, deal.RawSize, *dealUrl, headers) + if err != nil { + return false, xerrors.Errorf("UUID: %s: failed to add deal to a sector: %w", deal.UUID, err) + } + + n, err := tx.Exec(`UPDATE market_mk12_deal_pipeline SET sector = $1, sector_offset = $2 + WHERE uuid = $3 AND sector = NULL AND sector_offset = NULL`, info.Sector, info.Offset, deal.UUID) + if err != nil { + return false, xerrors.Errorf("UUID: %s: failed to add sector %d and offset %d details to DB: %w", deal.UUID, info.Sector, info.Offset, err) + } + if n != 1 { + if err != nil { + return false, xerrors.Errorf("UUID: %s: expected 1 deal update for add sector %d and offset %d details to DB but found %d", deal.UUID, info.Sector, info.Offset, n) + } + } + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return xerrors.Errorf("UUID: %s: failed to add deal to a sector: %w", deal.UUID, err) + } + + if !comm { + return xerrors.Errorf("UUID: %s: failed to commit transaction: %w", deal.UUID, err) + } + + log.Infof("Added deal %s to sector %d at %d", deal.UUID, info.Sector, info.Offset) + return nil +} diff --git a/tasks/storage-market/task_commp.go b/tasks/storage-market/task_commp.go new file mode 100644 index 000000000..36d637b07 --- /dev/null +++ b/tasks/storage-market/task_commp.go @@ -0,0 +1,391 @@ +package storage_market + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-commp-utils/writer" + commcid "github.com/filecoin-project/go-fil-commcid" + commpl "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/storiface" + + "github.com/filecoin-project/lotus/chain/types" +) + +type CommpTask struct { + sm *CurioStorageDealMarket + db *harmonydb.DB + sc *ffi.SealCalls + api headAPI + max int +} + +func NewCommpTask(sm *CurioStorageDealMarket, db *harmonydb.DB, sc *ffi.SealCalls, api headAPI, max int) *CommpTask { + return &CommpTask{ + sm: sm, + db: db, + sc: sc, + api: api, + max: max, + } +} + +func (c *CommpTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var pieces []struct { + Pcid string `db:"piece_cid"` + Psize int64 `db:"piece_size"` + UUID string `db:"uuid"` + URL *string `db:"url"` + Headers json.RawMessage `db:"headers"` + Size *int64 `db:"raw_size"` + } + + err = c.db.Select(ctx, &pieces, `SELECT uuid, url, headers, raw_size, piece_cid + FROM market_mk12_deal_pipeline WHERE commp_task_id = $1`, taskID) + + if err != nil { + return false, xerrors.Errorf("getting piece details: %w", err) + } + + if len(pieces) != 1 { + return false, xerrors.Errorf("expected 1 piece, got %d", len(pieces)) + } + piece := pieces[0] + + expired, err := checkExpiry(ctx, c.db, c.api, piece.UUID, c.sm.pin.GetExpectedSealDuration()) + if err != nil { + return false, xerrors.Errorf("deal %s expired: %w", piece.UUID, err) + } + if expired { + return true, nil + } + + if piece.URL != nil { + dataUrl := *piece.URL + + goUrl, err := url.Parse(dataUrl) + if err != nil { + return false, xerrors.Errorf("parsing data URL: %w", err) + } + + var reader io.Reader // io.ReadCloser is not supported by padreader + var closer io.Closer + + if goUrl.Scheme == "pieceref" { + // url is to a piece reference + + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return false, xerrors.Errorf("parsing piece reference number: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = c.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + if len(pieceID) != 1 { + return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) + } + + pr, err := c.sc.PieceReader(ctx, pieceID[0].PieceID) + if err != nil { + return false, xerrors.Errorf("getting piece reader: %w", err) + } + + closer = pr + reader = pr + + } else { + // Create a new HTTP request + req, err := http.NewRequest(http.MethodGet, goUrl.String()+fmt.Sprintf("/data?id=%s", piece.Pcid), nil) + if err != nil { + return false, xerrors.Errorf("error creating request: %w", err) + } + + hdrs := make(http.Header) + + err = json.Unmarshal(piece.Headers, &hdrs) + + if err != nil { + return false, xerrors.Errorf("error unmarshaling headers: %w", err) + } + + // Add custom headers for security and authentication + req.Header = hdrs + + // Create a client and make the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return false, xerrors.Errorf("error making GET request: %w", err) + } + + // Check if the file is found + if resp.StatusCode != http.StatusOK { + return false, xerrors.Errorf("not ok response from HTTP server: %s", resp.Status) + } + + closer = resp.Body + reader = resp.Body + } + + pReader, _ := padreader.New(reader, uint64(*piece.Size)) + + defer func() { + _ = closer.Close() + }() + + w := &writer.Writer{} + written, err := io.CopyBuffer(w, pReader, make([]byte, writer.CommPBuf)) + if err != nil { + return false, xerrors.Errorf("copy into commp writer: %w", err) + } + + if written != *piece.Size { + return false, xerrors.Errorf("number of bytes written to CommP writer %d not equal to the file size %d", written, piece.Size) + } + + calculatedCommp, err := w.Sum() + if err != nil { + return false, xerrors.Errorf("computing commP failed: %w", err) + } + + if calculatedCommp.PieceSize < abi.PaddedPieceSize(piece.Psize) { + // pad the data so that it fills the piece + rawPaddedCommp, err := commpl.PadCommP( + // we know how long a pieceCid "hash" is, just blindly extract the trailing 32 bytes + calculatedCommp.PieceCID.Hash()[len(calculatedCommp.PieceCID.Hash())-32:], + uint64(calculatedCommp.PieceSize), + uint64(piece.Psize), + ) + if err != nil { + return false, xerrors.Errorf("failed to pad commp: %w", err) + } + calculatedCommp.PieceCID, _ = commcid.DataCommitmentV1ToCID(rawPaddedCommp) + } + + pcid, err := cid.Parse(piece.Pcid) + if err != nil { + return false, xerrors.Errorf("parsing piece cid: %w", err) + } + + if !pcid.Equals(calculatedCommp.PieceCID) { + return false, xerrors.Errorf("commP mismatch calculated %s and supplied %s", pcid, calculatedCommp.PieceCID) + } + + n, err := c.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET after_commp = TRUE, commp_task_id = NULL WHERE commp_task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("store commp success: updating deal pipeline: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("store commp success: updated %d rows", n) + } + + _, err = c.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET psd_wait_time = NOW() AT TIME ZONE 'UTC' WHERE uuid = $1`, piece.UUID) + if err != nil { + return false, xerrors.Errorf("store psd time: updating deal pipeline: %w", err) + } + + return true, nil + } + + return false, xerrors.Errorf("failed to find URL for the piece %s in the db", piece.Pcid) + +} + +func (c *CommpTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + // CommP task can be of 2 types + // 1. Using ParkPiece pieceRef + // 2. Using remote HTTP reader + // ParkPiece should be scheduled on same node which has the piece + // Remote HTTP ones can be scheduled on any node + + ctx := context.Background() + + var tasks []struct { + TaskID harmonytask.TaskID `db:"commp_task_id"` + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + StorageID string `db:"storage_id"` + Url *string `db:"url"` + } + + indIDs := make([]int64, len(ids)) + for i, id := range ids { + indIDs[i] = int64(id) + } + + comm, err := c.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = tx.Select(&tasks, ` + SELECT commp_task_id, sp_id, sector_number, url FROM market_mk12_deal_pipeline + WHERE commp_task_id = ANY ($1)`, indIDs) + if err != nil { + return false, xerrors.Errorf("failed to get deal details from DB: %w", err) + } + + if storiface.FTPiece != 32 { + panic("storiface.FTPiece != 32") + } + + for _, task := range tasks { + if task.Url != nil { + goUrl, err := url.Parse(*task.Url) + if err != nil { + return false, xerrors.Errorf("parsing data URL: %w", err) + } + if goUrl.Scheme == "pieceref" { + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return false, xerrors.Errorf("parsing piece reference number: %w", err) + } + + // get pieceID + var pieceID []struct { + PieceID storiface.PieceNumber `db:"piece_id"` + } + err = tx.Select(&pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) + if err != nil { + return false, xerrors.Errorf("getting pieceID: %w", err) + } + + var sLocation string + + err = tx.QueryRow(` + SELECT storage_id FROM sector_location + WHERE miner_id = $1 AND sector_num = $2 AND l.sector_filetype = 32`, task.SpID, pieceID[0].PieceID).Scan(&sLocation) + + if err != nil { + return false, xerrors.Errorf("failed to get storage location from DB: %w", err) + } + + task.StorageID = sLocation + } + } + } + return true, nil + }, harmonydb.OptionRetry()) + + if err != nil { + return nil, err + } + + if !comm { + return nil, xerrors.Errorf("failed to commit the transaction") + } + + ls, err := c.sc.LocalStorage(ctx) + if err != nil { + return nil, xerrors.Errorf("getting local storage: %w", err) + } + + acceptables := map[harmonytask.TaskID]bool{} + + for _, t := range ids { + acceptables[t] = true + } + + for _, t := range tasks { + if _, ok := acceptables[t.TaskID]; !ok { + continue + } + + for _, l := range ls { + if string(l.ID) == t.StorageID { + return &t.TaskID, nil + } + } + } + + // If no local pieceRef was found then just return first TaskID + return &ids[0], nil +} + +func (c *CommpTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: c.max, + Name: "CommP", + Cost: resources.Resources{ + Cpu: 1, + Ram: 1 << 30, + }, + MaxFailures: 3, + } +} + +func (c *CommpTask) Adder(taskFunc harmonytask.AddTaskFunc) { + c.sm.adders[pollerCommP].Set(taskFunc) +} + +var _ = harmonytask.Reg(&CommpTask{}) +var _ harmonytask.TaskInterface = &CommpTask{} + +func failDeal(ctx context.Context, db *harmonydb.DB, deal string, updatePipeline bool, reason string) error { + n, err := db.Exec(ctx, `UPDATE market_mk12_deals SET error = $1 WHERE uuid = $2`, reason, deal) + if err != nil { + return xerrors.Errorf("store deal failure: updating deal pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store deal failure: updated %d rows", n) + } + if updatePipeline { + n, err := db.Exec(ctx, `DELETE FROM market_mk12_deal_pipeline WHERE uuid = $1`, deal) + if err != nil { + return xerrors.Errorf("store deal pipeline cleanup: updating deal pipeline: %w", err) + } + if n != 1 { + return xerrors.Errorf("store deal pipeline cleanup: updated %d rows", n) + } + } + return nil +} + +type headAPI interface { + ChainHead(context.Context) (*types.TipSet, error) +} + +func checkExpiry(ctx context.Context, db *harmonydb.DB, api headAPI, deal string, sealDuration abi.ChainEpoch) (bool, error) { + var starts []struct { + StartEpoch int64 `db:"start_epoch"` + } + err := db.Select(ctx, &starts, `SELECT start_epoch FROM market_mk12_deals WHERE uuid = $1`, deal) + if err != nil { + return false, xerrors.Errorf("failed to get start epoch from DB: %w", err) + } + if len(starts) != 1 { + return false, xerrors.Errorf("expected 1 row but got %d", len(starts)) + } + startEPoch := abi.ChainEpoch(starts[0].StartEpoch) + head, err := api.ChainHead(ctx) + if err != nil { + return false, err + } + + if head.Height()+sealDuration > startEPoch { + err = failDeal(ctx, db, deal, true, fmt.Sprintf("deal proposal must be proven on chain by deal proposal start epoch %d, but it has expired: current chain height: %d", + startEPoch, head.Height())) + return true, err + } + return false, nil +} diff --git a/tasks/storage-market/task_find_deal.go b/tasks/storage-market/task_find_deal.go new file mode 100644 index 000000000..6ae9979fc --- /dev/null +++ b/tasks/storage-market/task_find_deal.go @@ -0,0 +1,320 @@ +package storage_market + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/promise" + + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" +) + +var fdLog = logging.Logger("Post-PSD") + +type fdealApi interface { + headAPI + ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) + StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) +} + +// FindDealTask represents a task for finding and identifying on chain deals once deal have been published. +// Once PublishStorageDeal message has been successfully executed, each proposal is assigned a deal. +// These deal ID must be matched to the original sent proposals so a local deal can be identified with an +// on chain deal ID. +type FindDealTask struct { + sm *CurioStorageDealMarket + db *harmonydb.DB + api fdealApi + TF promise.Promise[harmonytask.AddTaskFunc] + cfg *config.MK12Config +} + +func NewFindDealTask(sm *CurioStorageDealMarket, db *harmonydb.DB, api fdealApi, cfg *config.MK12Config) *FindDealTask { + return &FindDealTask{ + sm: sm, + db: db, + api: api, + cfg: cfg, + } +} + +func (f *FindDealTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var bdeals []struct { + UUID string `db:"uuid"` + PublishCid string `db:"publish_cid"` + Proposal json.RawMessage `db:"proposal"` + } + + err = f.db.Select(ctx, &bdeals, `SELECT + p.uuid, + b.publish_cid, + b.proposal + FROM + market_mk12_deal_pipeline p + JOIN + market_mk12_deals b ON p.uuid = b.uuid + WHERE + p.find_deal_task_id = $1;`, taskID) + + if err != nil { + return false, xerrors.Errorf("getting deals from db: %w", err) + } + + if len(bdeals) != 1 { + return false, xerrors.Errorf("expected 1 deal, got %d", len(bdeals)) + } + bd := bdeals[0] + + expired, err := checkExpiry(ctx, f.db, f.api, bd.UUID, f.sm.pin.GetExpectedSealDuration()) + if err != nil { + return false, xerrors.Errorf("deal %s expired: %w", bd.UUID, err) + } + if expired { + return true, nil + } + + pcd, err := cid.Parse(bd.PublishCid) + if err != nil { + return false, xerrors.Errorf("parsing publishCid: %w", err) + } + + var prop market.DealProposal + err = json.Unmarshal(bd.Proposal, &prop) + if err != nil { + return false, xerrors.Errorf("unmarshalling proposal: %w", err) + } + + var execResult []struct { + ExecutedTskCID string `db:"executed_tsk_cid"` + ExecutedTskEpoch int64 `db:"executed_tsk_epoch"` + ExecutedMsgCID string `db:"executed_msg_cid"` + + ExecutedRcptExitCode int64 `db:"executed_rcpt_exitcode"` + ExecutedRcptGasUsed int64 `db:"executed_rcpt_gas_used"` + } + + err = f.db.Select(ctx, &execResult, `SELECT executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, + executed_rcpt_exitcode, executed_rcpt_gas_used + FROM message_waits + WHERE signed_message_cid AND executed_tsk_epoch IS NOT NULL`, bd.PublishCid) + if err != nil { + fdLog.Errorw("failed to query message_waits", "error", err) + } + if len(execResult) != 1 { + return false, xerrors.Errorf("expected 1 result, got %d", len(execResult)) + } + + res := execResult[0] + if exitcode.ExitCode(res.ExecutedRcptExitCode) != exitcode.Ok { + // Reset the deal to after_commp state + err = f.resendPSD(ctx, bd.UUID, taskID) + if err != nil { + return false, xerrors.Errorf("Storing failure FindDeal: %w", err) + } + return true, nil + } + + // Get the return value of the publish deals message + wmsg, err := f.api.StateSearchMsg(ctx, types.EmptyTSK, pcd, api.LookbackNoLimit, true) + if err != nil { + return false, xerrors.Errorf("getting publish deals message return value: %w", err) + } + + if wmsg == nil { + return false, xerrors.Errorf("looking for publish deal message %s: not found", pcd.String()) + } + + nv, err := f.api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return false, xerrors.Errorf("getting network version: %w", err) + } + + retval, err := market.DecodePublishStorageDealsReturn(wmsg.Receipt.Return, nv) + if err != nil { + return false, xerrors.Errorf("looking for publish deal message %s: decoding message return: %w", pcd, err) + } + + dealIDs, err := retval.DealIDs() + if err != nil { + return false, xerrors.Errorf("looking for publish deal message %s: getting dealIDs: %w", pcd, err) + } + + // Get the parameters to the publish deals message + pubmsg, err := f.api.ChainGetMessage(ctx, pcd) + if err != nil { + return false, xerrors.Errorf("getting publish deal message %s: %w", pcd, err) + } + + var pubDealsParams market9.PublishStorageDealsParams + if err := pubDealsParams.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { + return false, xerrors.Errorf("unmarshalling publish deal message params for message %s: %w", pcd, err) + } + + // Scan through the deal proposals in the message parameters to find the + // index of the target deal proposal + dealIdx := -1 + for i, paramDeal := range pubDealsParams.Deals { + eq, err := f.checkDealEquality(ctx, prop, paramDeal.Proposal) + if err != nil { + return false, xerrors.Errorf("comparing publish deal message %s proposal to deal proposal: %w", pcd, err) + } + if eq { + dealIdx = i + break + } + } + + if dealIdx == -1 { + return false, xerrors.Errorf("could not find deal in publish deals message %s", pcd) + } + + if dealIdx >= len(pubDealsParams.Deals) { + return false, xerrors.Errorf( + "deal index %d out of bounds of deal proposals (len %d) in publish deals message %s", + dealIdx, len(pubDealsParams.Deals), pcd) + } + + valid, outIdx, err := retval.IsDealValid(uint64(dealIdx)) + if err != nil { + return false, xerrors.Errorf("determining deal validity: %w", err) + } + + if !valid { + return false, xerrors.Errorf("deal was invalid at publication") + } + + // final check against for invalid return value output + // should not be reachable from onchain output, only pathological test cases + if outIdx >= len(dealIDs) { + return false, fmt.Errorf("invalid publish storage deals ret marking %d as valid while only returning %d valid deals in publish deal message %s", outIdx, len(dealIDs), pcd) + } + + onChainID := dealIDs[outIdx] + + // Lookup the deal state by deal ID + marketDeal, err := f.api.StateMarketStorageDeal(ctx, onChainID, types.EmptyTSK) + if err == nil { + // Make sure the retrieved deal proposal matches the target proposal + equal, err := f.checkDealEquality(ctx, prop, marketDeal.Proposal) + if err != nil { + return false, xerrors.Errorf("verifying proposal") + } + if !equal { + return false, xerrors.Errorf("Deal proposals for publish message %s did not match", pcd) + } + } + + comm, err := f.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + n, err := tx.Exec(`UPDATE market_mk12_deals SET chain_deal_id = $1 WHERE uuid = $2`, onChainID, bd.UUID) + if err != nil { + return false, xerrors.Errorf("failed to update on chain deal ID in DB: %w", err) + } + if n != 1 { + return false, xerrors.Errorf("failed to update on chain deal ID in DB: expected 1 rows affected, got %d", n) + } + n, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET after_find_deal = TRUE, find_deal_task_id = NULL WHERE find_deal_task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("DealFind store success: %w", err) + } + return n == 1, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("updating DB: %w", err) + } + if !comm { + return false, xerrors.Errorf("failed to commit the PSD success to DB") + } + return true, nil + +} + +func (f *FindDealTask) checkDealEquality(ctx context.Context, p1, p2 market.DealProposal) (bool, error) { + p1ClientID, err := f.api.StateLookupID(ctx, p1.Client, types.EmptyTSK) + if err != nil { + return false, err + } + p2ClientID, err := f.api.StateLookupID(ctx, p2.Client, types.EmptyTSK) + if err != nil { + return false, err + } + res := p1.PieceCID.Equals(p2.PieceCID) && + p1.PieceSize == p2.PieceSize && + p1.VerifiedDeal == p2.VerifiedDeal && + p1.Label.Equals(p2.Label) && + p1.StartEpoch == p2.StartEpoch && + p1.EndEpoch == p2.EndEpoch && + p1.StoragePricePerEpoch.Equals(p2.StoragePricePerEpoch) && + p1.ProviderCollateral.Equals(p2.ProviderCollateral) && + p1.ClientCollateral.Equals(p2.ClientCollateral) && + p1.Provider == p2.Provider && + p1ClientID == p2ClientID + + fdLog.Debugw("check deal quality", "result", res, "p1clientid", p1ClientID, "p2clientid", p2ClientID, "label_equality", p1.Label.Equals(p2.Label), "provider_equality", p1.Provider == p2.Provider) + + return res, nil +} + +func (f *FindDealTask) resendPSD(ctx context.Context, deal string, taskID harmonytask.TaskID) error { + n, err := f.db.Exec(ctx, `UPDATE market_mk12_deals SET publish_cid = NULL WHERE uuid = $1`, deal) + if err != nil { + return err + } + if n != 1 { + return xerrors.Errorf("expected 1 rows but got %d", n) + } + n, err = f.db.Exec(ctx, `UPDATE market_mk12_deal_pipeline SET after_find_deal = FALSE, find_deal_task_id = NULL WHERE find_deal_task_id = $1`, taskID) + if err != nil { + return err + } + if n != 1 { + return xerrors.Errorf("expected 1 rows but got %d", n) + } + return nil +} + +func (f *FindDealTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (f *FindDealTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: 128, + Name: "FindDeal", + Cost: resources.Resources{ + Cpu: 0, + Gpu: 0, + Ram: 1 << 20, + }, + MaxFailures: 16, + } +} + +func (f *FindDealTask) Adder(taskFunc harmonytask.AddTaskFunc) { + f.sm.adders[pollerFindDeal].Set(taskFunc) +} + +var _ = harmonytask.Reg(&FindDealTask{}) +var _ harmonytask.TaskInterface = &FindDealTask{} diff --git a/tasks/storage-market/task_psd.go b/tasks/storage-market/task_psd.go new file mode 100644 index 000000000..fcc97a108 --- /dev/null +++ b/tasks/storage-market/task_psd.go @@ -0,0 +1,290 @@ +package storage_market + +import ( + "context" + "encoding/json" + "fmt" + + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/curio/deps/config" + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/multictladdr" + "github.com/filecoin-project/curio/lib/promise" + "github.com/filecoin-project/curio/tasks/message" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/ctladdr" +) + +var psdlog = logging.Logger("PSD") + +type psdApi interface { + ChainHead(context.Context) (*types.TipSet, error) + GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) + StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) + ctladdr.NodeApi +} + +type PSDTask struct { + sm *CurioStorageDealMarket + db *harmonydb.DB + sender *message.Sender + as *multictladdr.MultiAddressSelector + cfg *config.MK12Config + api psdApi + + TF promise.Promise[harmonytask.AddTaskFunc] +} + +func NewPSDTask(sm *CurioStorageDealMarket, db *harmonydb.DB, sender *message.Sender, as *multictladdr.MultiAddressSelector, cfg *config.MK12Config, api psdApi) *PSDTask { + return &PSDTask{ + sm: sm, + db: db, + sender: sender, + as: as, + cfg: cfg, + api: api, + } +} + +func (p *PSDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var bdeals []struct { + Prop json.RawMessage `db:"proposal"` + Sig []byte `db:"proposal_signature"` + UUID string `db:"uuid"` + } + + err = p.db.Select(ctx, &bdeals, `SELECT + p.uuid, + b.proposal, + b.proposal_signature + FROM + market_mk12_deal_pipeline p + JOIN + market_12_deals b ON p.uuid = b.uuid + WHERE + p.psd_task_id = $1;`, taskID) + + if err != nil { + return false, xerrors.Errorf("getting deals from db: %w", err) + } + + type deal struct { + uuid string + sprop market.ClientDealProposal + } + + var deals []deal + + for _, d := range bdeals { + d := d + + var prop market.DealProposal + err = json.Unmarshal(d.Prop, &prop) + if err != nil { + return false, xerrors.Errorf("unmarshal proposal: %w", err) + } + + var sig *crypto.Signature + err = sig.UnmarshalBinary(d.Sig) + if err != nil { + return false, xerrors.Errorf("unmarshal signature: %w", err) + } + + deals = append(deals, deal{ + uuid: d.UUID, + sprop: market.ClientDealProposal{ + Proposal: prop, + ClientSignature: *sig, + }, + }) + } + + // Validate each deal and skip(fail) the ones which fail validation + var validDeals []deal + mi, err := p.api.StateMinerInfo(ctx, deals[0].sprop.Proposal.Provider, types.EmptyTSK) + if err != nil { + return false, xerrors.Errorf("getting provider info: %w", err) + } + for _, d := range deals { + pcid, err := d.sprop.Proposal.Cid() + if err != nil { + return false, xerrors.Errorf("computing proposal cid: %w", err) + } + + head, err := p.api.ChainHead(ctx) + if err != nil { + return false, err + } + if head.Height()+p.sm.pin.GetExpectedSealDuration() > d.sprop.Proposal.StartEpoch { + psdlog.Errorf( + "cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", + d.sprop.Proposal.PieceCID, head.Height(), d.sprop.Proposal.StartEpoch) + // Store error in main MK12 deal Table and Eject the deal from pipeline + err = failDeal(ctx, p.db, d.uuid, true, fmt.Sprintf("deal proposal must be proven on chain by deal proposal start epoch %d, but it has expired: current chain height: %d", + d.sprop.Proposal.StartEpoch, head.Height())) + if err != nil { + return false, err + } + continue + } + + params, err := actors.SerializeParams(&market.PublishStorageDealsParams{ + Deals: []market.ClientDealProposal{d.sprop}, + }) + if err != nil { + return false, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) + } + + addr, _, err := p.as.AddressFor(ctx, p.api, d.sprop.Proposal.Provider, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return false, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + + mss := &api.MessageSendSpec{ + MaxFee: abi.TokenAmount(p.cfg.MaxPublishDealFee), + } + + _, err = p.api.GasEstimateMessageGas(ctx, &types.Message{ + To: builtin.StorageMarketActorAddr, + From: addr, + Value: types.NewInt(0), + Method: builtin.MethodsMarket.PublishStorageDeals, + Params: params, + }, mss, head.Key()) + + if err != nil { + psdlog.Errorf("simulating deal publish message: %w", err) + continue + } + psdlog.Debugf("validated deal proposal %s successfully", pcid) + validDeals = append(validDeals, d) + } + + // Send PSD for valid deals + var vdeals []market.ClientDealProposal + for _, p := range validDeals { + vdeals = append(vdeals, p.sprop) + } + params, err := actors.SerializeParams(&market.PublishStorageDealsParams{ + Deals: vdeals, + }) + + if err != nil { + return false, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) + } + + addr, _, err := p.as.AddressFor(ctx, p.api, vdeals[0].Proposal.Provider, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return false, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + + msg := &types.Message{ + To: builtin.StorageMarketActorAddr, + From: addr, + Method: builtin.MethodsMarket.PublishStorageDeals, + Params: params, + Value: types.NewInt(0), + } + + mss := &api.MessageSendSpec{ + MaxFee: big.Mul(abi.TokenAmount(p.cfg.MaxPublishDealFee), big.NewInt(int64(len(vdeals)))), + } + + mcid, err := p.sender.Send(ctx, msg, mss, "psd") + + if err != nil { + return false, xerrors.Errorf("pushing deal publish message: %w", err) + } + + psdlog.Infof("published %d deals with message CID %s", len(vdeals), mcid) + + comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var uuids []string + // Update Boost Deal table with publish CID + for _, s := range validDeals { + uuids = append(uuids, s.uuid) + } + n, err := tx.Exec(`UPDATE market_mk12_deals SET publish_cid = $1 WHERE uuid = ANY($2)`, mcid.String(), uuids) + if err != nil { + return false, xerrors.Errorf("failed to update publish CID in DB: %w", err) + } + if n != len(validDeals) { + return false, xerrors.Errorf("failed to update publish CID in DB: expected %d rows affected, got %d", len(validDeals), n) + } + + // Update deal pipeline for successful deal published + n, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET after_psd = TRUE WHERE uuid = ANY($1)`, uuids) + if err != nil { + return false, xerrors.Errorf("PSD store success: %w", err) + } + if n != len(validDeals) { + return false, xerrors.Errorf("PSD store success: expected %d rows affected, got %d", len(validDeals), n) + } + + // Update deal pipeline for valid+invalid deals + n, err = tx.Exec(`UPDATE market_mk12_deal_pipeline SET psd_task_id = NULL WHERE psd_task_id = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("PSD store success: %w", err) + } + if n != len(bdeals) { + return false, xerrors.Errorf("PSD store success: expected %d rows affected, got %d", len(bdeals), n) + } + + // Update message wait + _, err = tx.Exec(`INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid) + if err != nil { + return false, xerrors.Errorf("inserting into message_waits: %w", err) + } + + return true, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("updating DB: %w", err) + } + if !comm { + return false, xerrors.Errorf("failed to commit the PSD success to DB") + } + + return true, nil +} + +func (p *PSDTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + return &ids[0], nil +} + +func (p *PSDTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Max: 128, + Name: "PSD", + Cost: resources.Resources{ + Cpu: 0, + Gpu: 0, + Ram: 1 << 20, + }, + MaxFailures: 16, + } +} + +func (p *PSDTask) Adder(taskFunc harmonytask.AddTaskFunc) { + p.sm.adders[pollerPSD].Set(taskFunc) +} + +var _ = harmonytask.Reg(&PSDTask{}) +var _ harmonytask.TaskInterface = &PSDTask{} diff --git a/tasks/window/compute_do.go b/tasks/window/compute_do.go index bf4ffe135..fae4f886a 100644 --- a/tasks/window/compute_do.go +++ b/tasks/window/compute_do.go @@ -25,11 +25,10 @@ import ( "github.com/filecoin-project/curio/build" "github.com/filecoin-project/curio/lib/ffiselect" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const disablePreChecks = false // todo config @@ -261,7 +260,11 @@ type CheckSectorsAPI interface { StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) } -func checkSectors(ctx context.Context, api CheckSectorsAPI, ft sealer.FaultTracker, +type FaultTracker interface { + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) +} + +func checkSectors(ctx context.Context, api CheckSectorsAPI, ft FaultTracker, maddr address.Address, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) { mid, err := address.IDFromAddress(maddr) if err != nil { diff --git a/tasks/window/compute_task.go b/tasks/window/compute_task.go index 6f375d2dd..2d7cf246a 100644 --- a/tasks/window/compute_task.go +++ b/tasks/window/compute_task.go @@ -25,14 +25,13 @@ import ( "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/promise" + storiface "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/curio/tasks/seal" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("curio/window") @@ -64,7 +63,7 @@ type WdPostTask struct { api WDPoStAPI db *harmonydb.DB - faultTracker sealer.FaultTracker + faultTracker FaultTracker storage paths.Store verifier storiface.Verifier paramsReady func() (bool, error) @@ -86,7 +85,7 @@ type wdTaskIdentity struct { func NewWdPostTask(db *harmonydb.DB, api WDPoStAPI, - faultTracker sealer.FaultTracker, + faultTracker FaultTracker, storage paths.Store, verifier storiface.Verifier, paramck func() (bool, error), @@ -357,9 +356,14 @@ func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEng return &tasks[0].TaskID, nil } -var res = storiface.ResourceTable[sealtasks.TTGenerateWindowPoSt] - func (t *WdPostTask) TypeDetails() harmonytask.TaskTypeDetails { + gpu := 1.0 + ram := uint64(25 << 30) + if seal.IsDevnet { + gpu = 0 + ram = 1 << 30 + } + return harmonytask.TaskTypeDetails{ Name: "WdPost", Max: t.max, @@ -368,17 +372,10 @@ func (t *WdPostTask) TypeDetails() harmonytask.TaskTypeDetails { Cost: resources.Resources{ Cpu: 1, - // todo set to something for 32/64G sector sizes? Technically windowPoSt is happy on a CPU - // but it will use a GPU if available - Gpu: 0, + Gpu: gpu, // RAM of smallest proof's max is listed here - Ram: lo.Reduce(lo.Keys(res), func(i uint64, k abi.RegisteredSealProof, _ int) uint64 { - if res[k].MaxMemory < i { - return res[k].MaxMemory - } - return i - }, 1<<63), + Ram: ram, }, } } diff --git a/tasks/window/faults_simple.go b/tasks/window/faults_simple.go index bda1be236..9e38e5e5c 100644 --- a/tasks/window/faults_simple.go +++ b/tasks/window/faults_simple.go @@ -13,8 +13,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/curio/lib/paths" - - "github.com/filecoin-project/lotus/storage/sealer/storiface" + "github.com/filecoin-project/curio/lib/storiface" ) type SimpleFaultTracker struct { diff --git a/tasks/window/recover_task.go b/tasks/window/recover_task.go index 673e7a79f..722a08001 100644 --- a/tasks/window/recover_task.go +++ b/tasks/window/recover_task.go @@ -24,14 +24,13 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" ) type WdPostRecoverDeclareTask struct { sender *message.Sender db *harmonydb.DB api WdPostRecoverDeclareTaskApi - faultTracker sealer.FaultTracker + faultTracker FaultTracker maxDeclareRecoveriesGasFee types.FIL as *multictladdr.MultiAddressSelector @@ -60,7 +59,7 @@ type WdPostRecoverDeclareTaskApi interface { func NewWdPostRecoverDeclareTask(sender *message.Sender, db *harmonydb.DB, api WdPostRecoverDeclareTaskApi, - faultTracker sealer.FaultTracker, + faultTracker FaultTracker, as *multictladdr.MultiAddressSelector, pcs *chainsched.CurioChainSched, diff --git a/tasks/winning/winning_task.go b/tasks/winning/winning_task.go index d738a9e67..c1c3bbe36 100644 --- a/tasks/winning/winning_task.go +++ b/tasks/winning/winning_task.go @@ -28,6 +28,7 @@ import ( "github.com/filecoin-project/curio/lib/ffiselect" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/promise" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -35,7 +36,6 @@ import ( lrand "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var log = logging.Logger("curio/winning") diff --git a/tools/cbor-gen.go b/tools/cbor-gen.go new file mode 100644 index 000000000..43a7071f3 --- /dev/null +++ b/tools/cbor-gen.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/hannahhoward/cbor-gen-for" +) diff --git a/web/api/sector/sector.go b/web/api/sector/sector.go index 12a2a8a34..16771715d 100644 --- a/web/api/sector/sector.go +++ b/web/api/sector/sector.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v9/market" "github.com/filecoin-project/curio/deps" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/curio/web/api/apihelper" "github.com/filecoin-project/lotus/blockstore" @@ -29,7 +30,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/cli/spcli" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) const verifiedPowerGainMul = 9 diff --git a/web/api/webrpc/deals.go b/web/api/webrpc/deals.go index efb1b8aad..9573e9744 100644 --- a/web/api/webrpc/deals.go +++ b/web/api/webrpc/deals.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/curio/market" + "github.com/filecoin-project/curio/market/storageingest" "github.com/filecoin-project/lotus/chain/types" ) @@ -45,5 +45,5 @@ func (a *WebRPC) DealsSealNow(ctx context.Context, spId, sectorNumber uint64) er return err } - return market.SealNow(ctx, a.deps.Chain, a.deps.DB, maddr, abi.SectorNumber(sectorNumber), false) + return storageingest.SealNow(ctx, a.deps.Chain, a.deps.DB, maddr, abi.SectorNumber(sectorNumber), false) } diff --git a/web/api/webrpc/sector.go b/web/api/webrpc/sector.go index 2c4e192db..ea2e70fba 100644 --- a/web/api/webrpc/sector.go +++ b/web/api/webrpc/sector.go @@ -13,9 +13,9 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type SectorInfo struct { diff --git a/web/api/webrpc/storage_deals.go b/web/api/webrpc/storage_deals.go new file mode 100644 index 000000000..8e3e983b6 --- /dev/null +++ b/web/api/webrpc/storage_deals.go @@ -0,0 +1,333 @@ +package webrpc + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" +) + +type StorageDealSummary struct { + ID string `db:"uuid"` + MinerID int64 `db:"sp_id"` + Sector int64 `db:"sector_num"` + CreatedAt time.Time `db:"created_at"` + SignedProposalCid string `db:"signed_proposal_cid"` + Offline bool `db:"offline"` + Verified bool `db:"verified"` + StartEpoch int64 `db:"start_epoch"` + EndEpoch int64 `db:"end_epoch"` + ClientPeerId string `db:"client_peer_id"` + ChainDealId int64 `db:"chain_deal_id"` + PublishCid string `db:"publish_cid"` + PieceCid string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + FastRetrieval bool `db:"fast_retrieval"` + AnnounceToIpni bool `db:"announce_to_ipni"` + Url string `db:"url"` + UrlHeaders http.Header `db:"url_headers"` + Error string `db:"error"` + Miner string +} + +type MarketMk12DealPipeline struct { + UUID string `db:"uuid"` + SpID int64 `db:"sp_id"` + Started bool `db:"started"` + PieceCID string `db:"piece_cid"` + Offline bool `db:"offline"` + Url string `db:"url"` + CommpTaskID *int64 `db:"commp_task_id"` // NULLable field, use pointer + AfterCommp bool `db:"after_commp"` + PsdTaskID *int64 `db:"psd_task_id"` // NULLable field, use pointer + AfterPsd bool `db:"after_psd"` + FindDealTaskID *int64 `db:"find_deal_task_id"` // NULLable field, use pointer + AfterFindDeal bool `db:"after_find_deal"` + Sector *int64 `db:"sector"` // NULLable field, use pointer + Sealed bool `db:"sealed"` + IndexingTaskID *int64 `db:"indexing_task_id"` // NULLable field, use pointer + Indexed bool `db:"indexed"` + Complete bool `db:"complete"` + Miner string + ParkPieceTaskID *int64 + AfterParkPiece bool +} + +func (a *WebRPC) PendingStorageDeals(ctx context.Context) ([]MarketMk12DealPipeline, error) { + var pipeline []MarketMk12DealPipeline + err := a.deps.DB.Select(ctx, &pipeline, `SELECT + uuid, + sp_id, + started, + piece_cid, + offline, + url, + commp_task_id, + after_commp, + psd_task_id, + after_psd, + find_deal_task_id, + after_find_deal, + sector, + sealed, + indexing_task_id, + indexed, + complete + FROM + market_mk12_deal_pipeline + ORDER BY sp_id`) + + if err != nil { + return nil, xerrors.Errorf("failed to get the deal pipeline from DB: %w", err) + } + + minerMap := make(map[int64]address.Address) + for _, s := range pipeline { + if addr, ok := minerMap[s.SpID]; ok { + s.Miner = addr.String() + continue + } + + addr, err := address.NewIDAddress(uint64(s.SpID)) + if err != nil { + return nil, err + } + s.Miner = addr.String() + + if !s.Started { + if s.Url != "" { + goUrl, err := url.Parse(s.Url) + if err != nil { + return nil, err + } + + if goUrl.Scheme == "pieceref" { + refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) + if err != nil { + return nil, xerrors.Errorf("parsing piece reference number: %w", err) + } + + var ParkedPieceInfo []struct { + TaskID *int64 `db:"task_id"` // TaskID may be NULL, so use a pointer + Complete bool `db:"complete"` + } + + err = a.deps.DB.Select(ctx, &ParkedPieceInfo, `SELECT + pp.task_id, + pp.complete + FROM + parked_piece_refs ppr + JOIN + parked_pieces pp ON ppr.piece_id = pp.id + WHERE + ppr.ref_id = $1;`, refNum) + + if err != nil { + return nil, err + } + + if len(ParkedPieceInfo) != 1 { + return nil, xerrors.Errorf("expected one park piece row for deal %s but got %d", s.UUID, len(ParkedPieceInfo)) + } + + s.AfterParkPiece = ParkedPieceInfo[0].Complete + s.ParkPieceTaskID = ParkedPieceInfo[0].TaskID + } + } + } + } + + return pipeline, nil +} + +func (a *WebRPC) StorageDealInfo(ctx context.Context, deal string) (*StorageDealSummary, error) { + + var isLegacy bool + var pcid cid.Cid + + id, err := uuid.Parse(deal) + if err != nil { + pcid, err = cid.Parse(deal) + if err != nil { + return &StorageDealSummary{}, err + } + isLegacy = true + } + + if !isLegacy { + var summaries []StorageDealSummary + err = a.deps.DB.Select(ctx, &summaries, `SELECT + md.uuid, + md.sp_id, + md.created_at, + md.signed_proposal_cid, + md.offline, + md.verified, + md.start_epoch, + md.end_epoch, + md.client_peer_id, + md.chain_deal_id, + md.publish_cid, + md.piece_cid, + md.piece_size, + md.fast_retrieval, + md.announce_to_ipni, + md.url, + md.url_headers, + md.error, + mid.sector_num + FROM market_mk12_deals md + LEFT JOIN market_piece_deal mpd ON mpd.id = md.uuid AND mpd.sp_id = md.sp_id + WHERE md.uuid = $1 AND mpd.boost_deal = TRUE AND mpd.legacy_deal = FALSE;`, id.String()) + + if err != nil { + return &StorageDealSummary{}, err + } + + d := summaries[0] + + addr, err := address.NewIDAddress(uint64(d.MinerID)) + if err != nil { + return &StorageDealSummary{}, err + } + + d.Miner = addr.String() + + return &d, nil + } + + var summaries []StorageDealSummary + err = a.deps.DB.Select(ctx, &summaries, `SELECT + signed_proposal_cid, + sp_id, + piece_cid, + piece_size, + offline, + verified, + start_epoch, + end_epoch, + publish_cid, + chain_deal_id, + piece_cid, + piece_size, + fast_retrieval, + created_at, + sector_num, + client_peer_id, + '' AS error, + '' AS url, + '' AS uuid, + NULL AS url_headers + FALSE AS announce_to_ipni + FROM market_legacy_deals + WHERE signed_proposal_cid = $1`, pcid.String()) + + if err != nil { + return &StorageDealSummary{}, err + } + + d := summaries[0] + + addr, err := address.NewIDAddress(uint64(d.MinerID)) + if err != nil { + return &StorageDealSummary{}, err + } + + d.Miner = addr.String() + + return &d, nil +} + +type StorageDealList struct { + ID string `db:"uuid"` + MinerID int64 `db:"sp_id"` + CreatedAt time.Time `db:"created_at"` + ChainDealId int64 `db:"chain_deal_id"` + Sector int64 `db:"sector_num"` + Miner string +} + +func (a *WebRPC) MK12StorageDealList(ctx context.Context) ([]StorageDealList, error) { + var mk12Summaries []StorageDealList + + err := a.deps.DB.Select(ctx, &mk12Summaries, `SELECT + md.uuid, + md.sp_id, + md.created_at, + md.chain_deal_id, + mid.sector_num + FROM market_mk12_deals md + LEFT JOIN market_piece_deal mpd ON mpd.id = md.uuid AND mpd.sp_id = md.sp_id + WHERE mpd.boost_deal = TRUE AND mpd.legacy_deal = FALSE;`) + + if err != nil { + return nil, err + } + + minerMap := make(map[int64]address.Address) + for _, s := range mk12Summaries { + if addr, ok := minerMap[s.MinerID]; ok { + s.Miner = addr.String() + continue + } + + addr, err := address.NewIDAddress(uint64(s.MinerID)) + if err != nil { + return nil, err + } + s.Miner = addr.String() + } + + return mk12Summaries, nil + +} + +func (a *WebRPC) LegacyStorageDealList(ctx context.Context) (string, error) { + var mk12Summaries []StorageDealList + + err := a.deps.DB.Select(ctx, &mk12Summaries, `SELECT + signed_proposal_cid AS uuid, + sp_id, + created_at, + chain_deal_id, + sector_num, + FROM market_legacy_deals;`) + + if err != nil { + return "", err + } + + minerMap := make(map[int64]address.Address) + for _, s := range mk12Summaries { + if addr, ok := minerMap[s.MinerID]; ok { + s.Miner = addr.String() + continue + } + + addr, err := address.NewIDAddress(uint64(s.MinerID)) + if err != nil { + return "", err + } + s.Miner = addr.String() + } + + x := new(bytes.Buffer) + //x := bufio.NewWriter() + + err = json.NewEncoder(x).Encode(map[string]any{"data": mk12Summaries}) + if err != nil { + return "", err + } + + return x.String(), nil + +} diff --git a/web/api/webrpc/storage_stats.go b/web/api/webrpc/storage_stats.go index e87cbb863..18e20cd67 100644 --- a/web/api/webrpc/storage_stats.go +++ b/web/api/webrpc/storage_stats.go @@ -4,8 +4,9 @@ import ( "context" "time" + "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type StorageGCStats struct { diff --git a/web/static/pages/pipeline_porep/index.html b/web/static/pages/pipeline_porep/index.html index ccbf8916b..a006f1dba 100644 --- a/web/static/pages/pipeline_porep/index.html +++ b/web/static/pages/pipeline_porep/index.html @@ -10,13 +10,13 @@ -
-
-

Curio PoRep Pipeline

-
-
-
+
+
+

Curio PoRep Pipeline

+
+
+
@@ -33,6 +33,7 @@

Sectors

-
+
+
\ No newline at end of file diff --git a/web/static/storagemarket/deal-pipeline.mjs b/web/static/storagemarket/deal-pipeline.mjs new file mode 100644 index 000000000..1120e0254 --- /dev/null +++ b/web/static/storagemarket/deal-pipeline.mjs @@ -0,0 +1,144 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; +customElements.define('deal-pipeline',class DealPipeline extends LitElement { + constructor() { + super(); + this.data = []; + this.loadData(); + } + async loadData() { + this.data = await RPCCall('PendingStorageDeals'); + setTimeout(() => this.loadData(), 3000); + this.requestUpdate(); + }; + + static styles = css` + .porep-pipeline-table, + .porep-state { + color: #d0d0d0; + } + + .porep-pipeline-table td, + .porep-pipeline-table th { + border-left: none; + border-collapse: collapse; + vertical-align: middle; + } + + .porep-pipeline-table tr:nth-child(odd) { + border-top: 6px solid #999999; + } + + .porep-pipeline-table tr:first-child, + .porep-pipeline-table tr:first-child { + border-top: none; + } + .porep-state { + border-collapse: collapse; + } + + .porep-state td, + .porep-state th { + border-left: 1px solid #f0f0f0; + border-right: 1px solid #f0f0f0; + + padding: 1px 5px; + + text-align: center; + font-size: 0.7em; + } + + .porep-state tr { + border-top: 1px solid #f0f0f0; + } + + .porep-state tr:first-child { + border-top: none; + } + + .pipeline-active { + background-color: #303060; + } + + .pipeline-success { + background-color: #306030; + } + + .pipeline-failed { + background-color: #603030; + }` + properties = { + sector: Object, + } + + render() { + return html` + + + + + + ${this.renderDeals()} + +
+ `; + } + + renderDeals() { + return this.data.map((deal) => html` + + ${deal.Miner} + ${deal.CreateTime} + ${this.renderDeal(deal)} + + DETAILS + + + + ${deal.UUID} + + `); + } + + renderDeal(deal) { + return html` + + + + ${this.renderDealState('PiecePark', 2, deal.ParkPieceTaskID, deal.AfterParkPiece)} + ${this.renderDealStateNoTask('Started', 2, !deal.Started, deal.Started)} + ${this.renderDealState('CommP', 2, deal.CommpTaskID, deal.AfterCommp)} + ${this.renderSectorState('PSD', 1, deal.PsdTaskID, deal.AfterPsd)} + + ${this.renderDealStateNoTask('Sealing', 1, !deal.Sealed, deal.Sealed)} + + + + ${this.renderSectorState('PSD Wait', 1, deal.FindDealTaskID, deal.AfterFindDeal)} + ${this.renderSectorState('Indexing', 1, deal.IndexingTaskID, deal.Indexed)} + + +
+
Add To Sector
+
${deal.Sector}
+
+ `; + } + renderDealStateNoTask(name, rowspan, active, after) { + return html` + +
${name}
+
${after?'done':'--'}
+ + `; + } + renderDealState(name, rowspan, task, after) { + return html` + +
${name}
+
${after?'done':task?'T:'+task:'--'}
+ + `; + } + +} ); diff --git a/web/static/storagemarket/deal/deal.mjs b/web/static/storagemarket/deal/deal.mjs new file mode 100644 index 000000000..e4d3e07d5 --- /dev/null +++ b/web/static/storagemarket/deal/deal.mjs @@ -0,0 +1,60 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; + +class DealDetails extends LitElement { + constructor() { + super(); + this.loadData(); + } + + async loadData() { + const params = new URLSearchParams(window.location.search); + this.data = await RPCCall('StorageDealInfo', [params.get('id')]); + setTimeout(() => this.loadData(), 5000); + this.requestUpdate(); + } + + render() { + return html` + + + + + + + + + + + ${this.data.flatMap(entry => [ + {property: 'ID', value: entry.UUID}, + {property: 'Provider', value: entry.Miner}, + {property: 'Sector Number', value: entry.Sector}, + {property: 'Created At', value: entry.CreatedAt}, + {property: 'Signed Proposal Cid', value: entry.SignedProposalCid}, + {property: 'Offline', value: entry.Offline}, + {property: 'Verified', value: entry.Verified}, + {property: 'Start Epoch', value: entry.StartEpoch}, + {property: 'End Epoch', value: entry.EndEpoch}, + {property: 'Client Peer ID', value: entry.ClientPeerId}, + {property: 'Chain Deal ID', value: entry.ChainDealId}, + {property: 'Publish CID', value: entry.PublishCid}, + {property: 'Piece CID', value: entry.PieceCid}, + {property: 'Piece Size', value: entry.PieceSize}, + {property: 'Fast Retrieval', value: entry.FastRetrieval}, + {property: 'Announce To IPNI', value: entry.AnnounceToIpni}, + {property: 'Url', value: entry.Url}, + {property: 'Url Headers', value: JSON.stringify(entry.UrlHeaders, null, 2)}, + {property: 'Error', value: entry.Error}, + ]).map(item => html` + + + + + `)} + +
PropertyValue
${item.property}${item.value}
+ `; + } +} +customElements.define('deal-details', DealDetails); diff --git a/web/static/storagemarket/deal/index.html b/web/static/storagemarket/deal/index.html new file mode 100644 index 000000000..fb720b060 --- /dev/null +++ b/web/static/storagemarket/deal/index.html @@ -0,0 +1,28 @@ + + + + + Deals + + + + + + +
+
+

Deal Info

+
+
+
+
+
+ +
+
+ +
+
+ + + \ No newline at end of file diff --git a/web/static/storagemarket/index.html b/web/static/storagemarket/index.html new file mode 100644 index 000000000..e7f339c4e --- /dev/null +++ b/web/static/storagemarket/index.html @@ -0,0 +1,24 @@ + + + + Node Info + + + + +
+
+

Deal Pipeline

+
+
+
+
+
+ +
+
+ +
+
+ + diff --git a/web/static/storagemarket/legacyList/index.html b/web/static/storagemarket/legacyList/index.html new file mode 100644 index 000000000..e57860b90 --- /dev/null +++ b/web/static/storagemarket/legacyList/index.html @@ -0,0 +1,24 @@ + + + + Node Info + + + + +
+
+

Sector Info

+
+
+
+
+
+ +
+
+ +
+
+ + \ No newline at end of file diff --git a/web/static/storagemarket/legacyList/legacy-list.mjs b/web/static/storagemarket/legacyList/legacy-list.mjs new file mode 100644 index 000000000..f974913d8 --- /dev/null +++ b/web/static/storagemarket/legacyList/legacy-list.mjs @@ -0,0 +1,142 @@ +import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; +import RPCCall from '/lib/jsonrpc.mjs'; + +class DealDetails extends LitElement { + constructor() { + super(); + this.loadData(); + } + + async loadData() { + const params = new URLSearchParams(window.location.search); + this.data = await RPCCall('LegacyStorageDealList', [params.get('id')]); + setTimeout(() => this.loadData(), 5000); + this.requestUpdate(); + } + + // TODO: Fix DATAtable for legacy deals and test with dummy data + // TODO: Combine lists maybe? + + render() { + return html` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MinerIDCreated AtChain IDSector
Loading...
+ + `; + + } +} +customElements.define('deal-details', DealDetails); diff --git a/web/static/storagemarket/storagemarket.mjs b/web/static/storagemarket/storagemarket.mjs new file mode 100644 index 000000000..a6b439f90 --- /dev/null +++ b/web/static/storagemarket/storagemarket.mjs @@ -0,0 +1,86 @@ +import {LitElement, css, html} from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; + +//import 'https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.esm.js'; + + +class MarketUX extends LitElement { + static styles = css` +\ .market-slot { + } + :host { + display: block; + margin: 2px 3px; + } + + `; + connectedCallback() { + super.connectedCallback(); + //"https://unpkg.com/@cds/core/global.min.css", + //"https://unpkg.com/@cds/city/css/bundles/default.min.css", + //"https://unpkg.com/@cds/core/styles/theme.dark.min.css", + //"https://unpkg.com/@clr/ui/clr-ui.min.css", + + document.head.innerHTML += ` + + + + + +` + + document.documentElement.lang = 'en'; + + // how Bootstrap & DataTables expect dark mode declared. + document.documentElement.classList.add('dark'); + + this.messsage = this.getCookieMessage(); + } + + render() { + return html` +
+ + + ${this.message ? html` + ` : html``} + +
+ + `; + } + + getCookieMessage() { + const name = 'message'; + const cookies = document.cookie.split(';'); + for (let i = 0; i < cookies.length; i++) { + const cookie = cookies[i].trim(); + if (cookie.startsWith(name + '=')) { + var val = cookie.substring(name.length + 1); + document.cookie = name + '=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;'; + return val; + } + } + return null; + } + +}; + +customElements.define('market-ux', MarketUX); \ No newline at end of file diff --git a/web/static/ux/curio-ux.mjs b/web/static/ux/curio-ux.mjs index ea98e86e7..4432820b8 100644 --- a/web/static/ux/curio-ux.mjs +++ b/web/static/ux/curio-ux.mjs @@ -86,6 +86,35 @@ class CurioUX extends LitElement { + ` document.documentElement.lang = 'en'; @@ -126,11 +155,12 @@ class CurioUX extends LitElement { - - ${this.message ? html`` : html``} `; diff --git a/web/static/ux/fonts/JetBrainsMono-Bold.woff2 b/web/static/ux/fonts/JetBrainsMono-Bold.woff2 new file mode 100644 index 000000000..4917f4341 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Bold.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-BoldItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-BoldItalic.woff2 new file mode 100644 index 000000000..536d3f715 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-BoldItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-ExtraBold.woff2 b/web/static/ux/fonts/JetBrainsMono-ExtraBold.woff2 new file mode 100644 index 000000000..8f88c5464 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-ExtraBold.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-ExtraBoldItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-ExtraBoldItalic.woff2 new file mode 100644 index 000000000..d1478bacc Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-ExtraBoldItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-ExtraLight.woff2 b/web/static/ux/fonts/JetBrainsMono-ExtraLight.woff2 new file mode 100644 index 000000000..b97239f32 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-ExtraLight.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-ExtraLightItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-ExtraLightItalic.woff2 new file mode 100644 index 000000000..be01aaca5 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-ExtraLightItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-Italic.woff2 b/web/static/ux/fonts/JetBrainsMono-Italic.woff2 new file mode 100644 index 000000000..d60c270e8 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Italic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-Light.woff2 b/web/static/ux/fonts/JetBrainsMono-Light.woff2 new file mode 100644 index 000000000..653849873 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Light.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-LightItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-LightItalic.woff2 new file mode 100644 index 000000000..66ca3d2b9 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-LightItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-Medium.woff2 b/web/static/ux/fonts/JetBrainsMono-Medium.woff2 new file mode 100644 index 000000000..669d04cdf Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Medium.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-MediumItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-MediumItalic.woff2 new file mode 100644 index 000000000..80cfd15e0 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-MediumItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-Regular.woff2 b/web/static/ux/fonts/JetBrainsMono-Regular.woff2 new file mode 100644 index 000000000..40da42765 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Regular.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-SemiBold.woff2 b/web/static/ux/fonts/JetBrainsMono-SemiBold.woff2 new file mode 100644 index 000000000..5ead7b0d6 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-SemiBold.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-SemiBoldItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-SemiBoldItalic.woff2 new file mode 100644 index 000000000..c5dd294b4 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-SemiBoldItalic.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-Thin.woff2 b/web/static/ux/fonts/JetBrainsMono-Thin.woff2 new file mode 100644 index 000000000..17270e459 Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-Thin.woff2 differ diff --git a/web/static/ux/fonts/JetBrainsMono-ThinItalic.woff2 b/web/static/ux/fonts/JetBrainsMono-ThinItalic.woff2 new file mode 100644 index 000000000..a6432151c Binary files /dev/null and b/web/static/ux/fonts/JetBrainsMono-ThinItalic.woff2 differ diff --git a/web/static/ux/main.css b/web/static/ux/main.css index 4af5e407e..94a00ca4b 100644 --- a/web/static/ux/main.css +++ b/web/static/ux/main.css @@ -1,4 +1,3 @@ - html { min-height: 100vh; background: rgb(11, 22, 34); @@ -8,13 +7,14 @@ html { body { margin: 0; background: rgb(11, 22, 34); - font-family: Metropolis, monospace; - font-weight: 400; + font-family: 'JetBrains Mono', monospace; + font-weight: 300; } * { - font-weight: 400; + font-weight: 200; } + h1,h2,h3,h4,h5,h6 { font-weight: 400; } @@ -22,12 +22,11 @@ h1,h2,h3,h4,h5,h6 { curio-ux { /* To resemble Clarity Design */ color: rgb(227, 234, 237); - font-family: Metropolis, monospace; - font-weight: 400; + font-family: 'JetBrains Mono', monospace; + font-weight: 200; background: RGB(54, 57, 63); } - .app-head { width: 100%; } @@ -67,50 +66,57 @@ a:hover { } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; font-weight: 400; - src: local('Metropolis'), url(/ux/fonts/Metropolis-Regular.woff) format('woff') + src: url('/ux/fonts/JetBrainsMono-Regular.woff2') format('woff2'); } @font-face { - font-family: metropolis; - font-style: normal; + font-family: 'JetBrains Mono'; + font-style: italic; font-weight: 400; - src: local('Metropolis'), url(/ux/fonts/Metropolis-ExtraLight.woff) format('woff') + src: url('/ux/fonts/JetBrainsMono-Italic.woff2') format('woff2'); } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; - font-weight: 400; - src: local('Metropolis'), url(/ux/fonts/Metropolis-Light.woff) format('woff') + font-weight: 200; + src: url('/ux/fonts/JetBrainsMono-ExtraLight.woff2') format('woff2'); } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; - font-weight: 400; - src: local('Metropolis'), url(/ux/fonts/Metropolis-Thin.woff) format('woff') + font-weight: 300; + src: url('/ux/fonts/JetBrainsMono-Light.woff2') format('woff2'); +} + +@font-face { + font-family: 'JetBrains Mono'; + font-style: normal; + font-weight: 100; + src: url('/ux/fonts/JetBrainsMono-Thin.woff2') format('woff2'); } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; font-weight: 500; - src: local('Metropolis'), url(/ux/fonts/Metropolis-Medium.woff) format('woff') + src: url('/ux/fonts/JetBrainsMono-Medium.woff2') format('woff2'); } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; font-weight: 700; - src: local('Metropolis'), url(/ux/fonts/Metropolis-Bold.woff) format('woff') + src: url('/ux/fonts/JetBrainsMono-Bold.woff2') format('woff2'); } @font-face { - font-family: metropolis; + font-family: 'JetBrains Mono'; font-style: normal; font-weight: 800; - src: local('Metropolis'), url(/ux/fonts/Metropolis-ExtraBold.woff) format('woff') + src: url('/ux/fonts/JetBrainsMono-ExtraBold.woff2') format('woff2'); } \ No newline at end of file